start 11 arch/alpha/include/asm/cacheflush.h #define flush_cache_range(vma, start, end) do { } while (0) start 17 arch/alpha/include/asm/cacheflush.h #define flush_cache_vmap(start, end) do { } while (0) start 18 arch/alpha/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 33 arch/alpha/include/asm/cacheflush.h #define flush_icache_range(start, end) imb() start 35 arch/alpha/include/asm/cacheflush.h #define flush_icache_range(start, end) smp_imb() start 47 arch/alpha/include/asm/machvec.h dma_addr_t start, dma_addr_t end); start 131 arch/alpha/include/asm/tlbflush.h flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 147 arch/alpha/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, start 65 arch/alpha/include/asm/vga.h (a) += pci_vga_hose->io_space->start; \ start 70 arch/alpha/include/asm/vga.h (a) += pci_vga_hose->mem_space->start; \ start 25 arch/alpha/kernel/console.c .start = 0x3C0, start 60 arch/alpha/kernel/console.c alpha_vga.start += hose->io_space->start; start 61 arch/alpha/kernel/console.c alpha_vga.end += hose->io_space->start; start 317 arch/alpha/kernel/core_apecs.c apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 257 arch/alpha/kernel/core_cia.c cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 285 arch/alpha/kernel/core_cia.c dma_addr_t start, dma_addr_t end) start 696 arch/alpha/kernel/core_cia.c hae_mem->start = 0; start 242 arch/alpha/kernel/core_lca.c lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 248 arch/alpha/kernel/core_marvel.c hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port); start 249 arch/alpha/kernel/core_marvel.c hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1; start 253 arch/alpha/kernel/core_marvel.c hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port); start 254 arch/alpha/kernel/core_marvel.c hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1; start 610 arch/alpha/kernel/core_marvel.c marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 705 arch/alpha/kernel/core_marvel.c if ((addr >> 32) == (hose->mem_space->start >> 32)) start 714 arch/alpha/kernel/core_marvel.c baddr = addr - hose->mem_space->start; start 777 arch/alpha/kernel/core_marvel.c vaddr = baddr + hose->mem_space->start; start 246 arch/alpha/kernel/core_mcpcia.c mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 311 arch/alpha/kernel/core_mcpcia.c io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS; start 312 arch/alpha/kernel/core_mcpcia.c io->end = io->start + 0xffff; start 316 arch/alpha/kernel/core_mcpcia.c mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS; start 317 arch/alpha/kernel/core_mcpcia.c mem->end = mem->start + 0xffffffff; start 321 arch/alpha/kernel/core_mcpcia.c hae_mem->start = mem->start; start 322 arch/alpha/kernel/core_mcpcia.c hae_mem->end = mem->start + MCPCIA_MEM_MASK; start 438 arch/alpha/kernel/core_t2.c hae_mem->start = 0; start 506 arch/alpha/kernel/core_t2.c t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 203 arch/alpha/kernel/core_titan.c titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 222 arch/alpha/kernel/core_titan.c if (((start ^ end) & 0xffff0000) == 0) start 227 arch/alpha/kernel/core_titan.c value = (start & 0xffff0000) >> 12; start 275 arch/alpha/kernel/core_titan.c hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS; start 276 arch/alpha/kernel/core_titan.c hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1; start 280 arch/alpha/kernel/core_titan.c hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS; start 281 arch/alpha/kernel/core_titan.c hose->mem_space->end = hose->mem_space->start + 0xffffffff; start 473 arch/alpha/kernel/core_titan.c addr += pci_vga_hose->mem_space->start; start 178 arch/alpha/kernel/core_tsunami.c tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 187 arch/alpha/kernel/core_tsunami.c if (((start ^ end) & 0xffff0000) == 0) start 192 arch/alpha/kernel/core_tsunami.c value = (start & 0xffff0000) >> 12; start 274 arch/alpha/kernel/core_tsunami.c hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS; start 275 arch/alpha/kernel/core_tsunami.c hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1; start 279 arch/alpha/kernel/core_tsunami.c hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS; start 280 arch/alpha/kernel/core_tsunami.c hose->mem_space->end = hose->mem_space->start + 0xffffffff; start 81 arch/alpha/kernel/core_wildfire.c hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS; start 82 arch/alpha/kernel/core_wildfire.c hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1; start 86 arch/alpha/kernel/core_wildfire.c hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS; start 87 arch/alpha/kernel/core_wildfire.c hose->mem_space->end = hose->mem_space->start + 0xffffffff; start 347 arch/alpha/kernel/core_wildfire.c wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) start 762 arch/alpha/kernel/osf_sys.c unsigned long, nbytes, int __user *, start, void __user *, arg) start 819 arch/alpha/kernel/osf_sys.c unsigned long, nbytes, int __user *, start, void __user *, arg) start 39 arch/alpha/kernel/pci-sysfs.c unsigned long nr, start, size; start 43 arch/alpha/kernel/pci-sysfs.c start = vma->vm_pgoff; start 46 arch/alpha/kernel/pci-sysfs.c if (start < size && size - start >= nr) start 50 arch/alpha/kernel/pci-sysfs.c current->comm, sparse ? " sparse" : "", start, start + nr, start 80 arch/alpha/kernel/pci-sysfs.c if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) start 87 arch/alpha/kernel/pci-sysfs.c vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); start 256 arch/alpha/kernel/pci-sysfs.c unsigned long nr, start, size; start 259 arch/alpha/kernel/pci-sysfs.c start = vma->vm_pgoff; start 262 arch/alpha/kernel/pci-sysfs.c if (start < size && size - start >= nr) start 266 arch/alpha/kernel/pci-sysfs.c current->comm, sparse ? " sparse" : "", start, start + nr, start 326 arch/alpha/kernel/pci-sysfs.c port += hose->io_space->start; start 350 arch/alpha/kernel/pci-sysfs.c port += hose->io_space->start; start 78 arch/alpha/kernel/pci.c dev->resource[2].start = dev->resource[3].start = 0; start 82 arch/alpha/kernel/pci.c dev->resource[0].start = 0x170; start 84 arch/alpha/kernel/pci.c dev->resource[1].start = 0x376; start 133 arch/alpha/kernel/pci.c resource_size_t start = res->start; start 137 arch/alpha/kernel/pci.c if (start - hose->io_space->start < PCIBIOS_MIN_IO) start 138 arch/alpha/kernel/pci.c start = PCIBIOS_MIN_IO + hose->io_space->start; start 143 arch/alpha/kernel/pci.c if (start & 0x300) start 144 arch/alpha/kernel/pci.c start = (start + 0x3ff) & ~0x3ff; start 148 arch/alpha/kernel/pci.c if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) start 149 arch/alpha/kernel/pci.c start = PCIBIOS_MIN_MEM + hose->mem_space->start; start 169 arch/alpha/kernel/pci.c start = ALIGN(start, alignto); start 171 arch/alpha/kernel/pci.c if (((start / (16*MB)) & 0x7) == 0) { start 172 arch/alpha/kernel/pci.c start &= ~(128*MB - 1); start 173 arch/alpha/kernel/pci.c start += 16*MB; start 174 arch/alpha/kernel/pci.c start = ALIGN(start, alignto); start 176 arch/alpha/kernel/pci.c if (start/(128*MB) != (start + size - 1)/(128*MB)) { start 177 arch/alpha/kernel/pci.c start &= ~(128*MB - 1); start 178 arch/alpha/kernel/pci.c start += (128 + 16)*MB; start 179 arch/alpha/kernel/pci.c start = ALIGN(start, alignto); start 184 arch/alpha/kernel/pci.c return start; start 296 arch/alpha/kernel/pci.c if (r->parent || !r->start || !r->flags) start 341 arch/alpha/kernel/pci.c end = hose->mem_space->start + pci_mem_end; start 347 arch/alpha/kernel/pci.c hose->io_space->start); start 349 arch/alpha/kernel/pci.c hose->mem_space->start); start 670 arch/alpha/kernel/pci_iommu.c struct scatterlist *start, *end, *out; start 689 arch/alpha/kernel/pci_iommu.c start = sg; start 722 arch/alpha/kernel/pci_iommu.c if (out - start == 0) start 724 arch/alpha/kernel/pci_iommu.c DBGA("pci_map_sg: %ld entries\n", out - start); start 726 arch/alpha/kernel/pci_iommu.c return out - start; start 734 arch/alpha/kernel/pci_iommu.c if (out > start) start 735 arch/alpha/kernel/pci_iommu.c pci_unmap_sg(pdev, start, out - start, dir); start 765 arch/alpha/kernel/perf_event.c .start = alpha_pmu_start, start 233 arch/alpha/kernel/setup.c { .name = "rtc", .start = -1, .end = -1 }, start 234 arch/alpha/kernel/setup.c { .name = "dma1", .start = 0x00, .end = 0x1f }, start 235 arch/alpha/kernel/setup.c { .name = "pic1", .start = 0x20, .end = 0x3f }, start 236 arch/alpha/kernel/setup.c { .name = "timer", .start = 0x40, .end = 0x5f }, start 237 arch/alpha/kernel/setup.c { .name = "keyboard", .start = 0x60, .end = 0x6f }, start 238 arch/alpha/kernel/setup.c { .name = "dma page reg", .start = 0x80, .end = 0x8f }, start 239 arch/alpha/kernel/setup.c { .name = "pic2", .start = 0xa0, .end = 0xbf }, start 240 arch/alpha/kernel/setup.c { .name = "dma2", .start = 0xc0, .end = 0xdf }, start 256 arch/alpha/kernel/setup.c standard_io_resources[0].start = RTC_PORT(0); start 292 arch/alpha/kernel/setup.c void *start; start 296 arch/alpha/kernel/setup.c start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE); start 297 arch/alpha/kernel/setup.c if (!start || __pa(start) + size > mem_limit) { start 301 arch/alpha/kernel/setup.c memmove(start, (void *)initrd_start, size); start 302 arch/alpha/kernel/setup.c initrd_start = (unsigned long)start; start 304 arch/alpha/kernel/setup.c printk("initrd moved to %p\n", start); start 305 arch/alpha/kernel/setup.c return start; start 1426 arch/alpha/kernel/setup.c .start = c_start, start 725 arch/alpha/kernel/smp.c flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 200 arch/alpha/kernel/sys_nautilus.c .start = 0, start 248 arch/alpha/kernel/sys_nautilus.c bus->resource[0]->start = 0; start 253 arch/alpha/kernel/sys_nautilus.c bus_align = bus->resource[1]->start; start 260 arch/alpha/kernel/sys_nautilus.c bus->resource[1]->start = pci_mem; start 287 arch/alpha/kernel/sys_sio.c io_port = dev->resource[0].start; start 62 arch/alpha/mm/numa.c unsigned long start, end; start 86 arch/alpha/mm/numa.c start = cluster->start_pfn; start 87 arch/alpha/mm/numa.c end = start + cluster->numpages; start 89 arch/alpha/mm/numa.c if (start >= node_pfn_end || end <= node_pfn_start) start 100 arch/alpha/mm/numa.c if (start < node_pfn_start) start 101 arch/alpha/mm/numa.c start = node_pfn_start; start 105 arch/alpha/mm/numa.c if (start < node_min_pfn) start 106 arch/alpha/mm/numa.c node_min_pfn = start; start 175 arch/alpha/oprofile/common.c ops->start = op_axp_start; start 40 arch/arc/include/asm/cacheflush.h void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); start 41 arch/arc/include/asm/cacheflush.h void dma_cache_inv(phys_addr_t start, unsigned long sz); start 42 arch/arc/include/asm/cacheflush.h void dma_cache_wback(phys_addr_t start, unsigned long sz); start 48 arch/arc/include/asm/cacheflush.h #define flush_cache_vmap(start, end) flush_cache_all() start 49 arch/arc/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) flush_cache_all() start 64 arch/arc/include/asm/cacheflush.h unsigned long start,unsigned long end); start 193 arch/arc/include/asm/entry.h ; SP is back to start of pt_regs start 71 arch/arc/include/asm/hugepage.h extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, start 14 arch/arc/include/asm/tlbflush.h void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); start 16 arch/arc/include/asm/tlbflush.h unsigned long start, unsigned long end); start 18 arch/arc/include/asm/tlbflush.h void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, start 32 arch/arc/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 35 arch/arc/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 39 arch/arc/include/asm/tlbflush.h extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 634 arch/arc/kernel/perf_event.c .start = arc_pmu_start, start 684 arch/arc/kernel/setup.c .start = c_start, start 232 arch/arc/kernel/unwind.c unsigned long start, fde; start 240 arch/arc/kernel/unwind.c return (e1->start > e2->start) - (e1->start < e2->start); start 249 arch/arc/kernel/unwind.c v = e1->start; start 250 arch/arc/kernel/unwind.c e1->start = e2->start; start 251 arch/arc/kernel/unwind.c e2->start = v; start 340 arch/arc/kernel/unwind.c header->table[n].start = read_pointer(&ptr, start 704 arch/arc/kernel/unwind.c static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, start 715 arch/arc/kernel/unwind.c if (start != state->cieStart) { start 723 arch/arc/kernel/unwind.c for (ptr.p8 = start; result && ptr.p8 < end;) { start 820 arch/arc/kernel/unwind.c processCFI(start, end, 0, ptrType, start 34 arch/arc/mm/cache.c void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz); start 35 arch/arc/mm/cache.c void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz); start 36 arch/arc/mm/cache.c void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz); start 98 arch/arc/mm/cache.c unsigned int start:4, limit:4, pad:22, order:1, disable:1; start 100 arch/arc/mm/cache.c unsigned int disable:1, order:1, pad:22, limit:4, start:4; start 135 arch/arc/mm/cache.c perip_base = vol.start << 28; start 873 arch/arc/mm/cache.c static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz) start 875 arch/arc/mm/cache.c __dc_line_op_k(start, sz, OP_FLUSH_N_INV); start 878 arch/arc/mm/cache.c static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz) start 880 arch/arc/mm/cache.c __dc_line_op_k(start, sz, OP_INV); start 883 arch/arc/mm/cache.c static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz) start 885 arch/arc/mm/cache.c __dc_line_op_k(start, sz, OP_FLUSH); start 892 arch/arc/mm/cache.c static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz) start 894 arch/arc/mm/cache.c __dc_line_op_k(start, sz, OP_FLUSH_N_INV); start 895 arch/arc/mm/cache.c slc_op(start, sz, OP_FLUSH_N_INV); start 898 arch/arc/mm/cache.c static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz) start 900 arch/arc/mm/cache.c __dc_line_op_k(start, sz, OP_INV); start 901 arch/arc/mm/cache.c slc_op(start, sz, OP_INV); start 904 arch/arc/mm/cache.c static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) start 906 arch/arc/mm/cache.c __dc_line_op_k(start, sz, OP_FLUSH); start 907 arch/arc/mm/cache.c slc_op(start, sz, OP_FLUSH); start 913 arch/arc/mm/cache.c void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) start 915 arch/arc/mm/cache.c __dma_cache_wback_inv(start, sz); start 919 arch/arc/mm/cache.c void dma_cache_inv(phys_addr_t start, unsigned long sz) start 921 arch/arc/mm/cache.c __dma_cache_inv(start, sz); start 925 arch/arc/mm/cache.c void dma_cache_wback(phys_addr_t start, unsigned long sz) start 927 arch/arc/mm/cache.c __dma_cache_wback(start, sz); start 1053 arch/arc/mm/cache.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 1132 arch/arc/mm/cache.c SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) start 324 arch/arc/mm/tlb.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 337 arch/arc/mm/tlb.c if (unlikely((end - start) >= PAGE_SIZE * 32)) { start 347 arch/arc/mm/tlb.c start &= PAGE_MASK; start 352 arch/arc/mm/tlb.c while (start < end) { start 353 arch/arc/mm/tlb.c tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); start 354 arch/arc/mm/tlb.c start += PAGE_SIZE; start 369 arch/arc/mm/tlb.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 375 arch/arc/mm/tlb.c if (unlikely((end - start) >= PAGE_SIZE * 32)) { start 380 arch/arc/mm/tlb.c start &= PAGE_MASK; start 383 arch/arc/mm/tlb.c while (start < end) { start 384 arch/arc/mm/tlb.c tlb_entry_erase(start); start 385 arch/arc/mm/tlb.c start += PAGE_SIZE; start 475 arch/arc/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 480 arch/arc/mm/tlb.c .ta_start = start, start 488 arch/arc/mm/tlb.c void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, start 493 arch/arc/mm/tlb.c .ta_start = start, start 501 arch/arc/mm/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 504 arch/arc/mm/tlb.c .ta_start = start, start 700 arch/arc/mm/tlb.c void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, start 714 arch/arc/mm/tlb.c tlb_entry_erase(start | _PAGE_HW_SZ | asid); start 183 arch/arm/boot/compressed/atags_to_fdt.c cpu_to_fdt64(atag->u.mem.start); start 188 arch/arm/boot/compressed/atags_to_fdt.c cpu_to_fdt32(atag->u.mem.start); start 195 arch/arm/boot/compressed/atags_to_fdt.c initrd_start = atag->u.initrd.start; start 234 arch/arm/common/it8152.c .start = 0x10000000, start 290 arch/arm/common/it8152.c it8152_io.start = (unsigned long)IT8152_IO_BASE + 0x12000; start 384 arch/arm/common/locomo.c lchip->phys = mem->start; start 392 arch/arm/common/locomo.c lchip->base = ioremap(mem->start, PAGE_SIZE); start 750 arch/arm/common/sa1111.c dev->res.start = sachip->phys + info->offset; start 751 arch/arm/common/sa1111.c dev->res.end = dev->res.start + 511; start 832 arch/arm/common/sa1111.c sachip->phys = mem->start; start 839 arch/arm/common/sa1111.c sachip->base = ioremap(mem->start, PAGE_SIZE * 2); start 192 arch/arm/common/scoop.c devptr->base = ioremap(mem->start, resource_size(mem)); start 201 arch/arm/common/scoop.c printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base); start 225 arch/arm/include/asm/cacheflush.h vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 230 arch/arm/include/asm/cacheflush.h __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), start 248 arch/arm/include/asm/cacheflush.h #define flush_cache_range(vma,start,end) \ start 249 arch/arm/include/asm/cacheflush.h vivt_flush_cache_range(vma,start,end) start 254 arch/arm/include/asm/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 277 arch/arm/include/asm/cacheflush.h #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) start 337 arch/arm/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 349 arch/arm/include/asm/cacheflush.h static inline void flush_cache_vunmap(unsigned long start, unsigned long end) start 130 arch/arm/include/asm/ecard.h #define ecard_resource_start(ec,nr) ((ec)->resource[nr].start) start 133 arch/arm/include/asm/ecard.h (ec)->resource[nr].start + 1) start 39 arch/arm/include/asm/fiq.h extern void set_fiq_handler(void *start, unsigned int length); start 172 arch/arm/include/asm/hardware/iomd.h #define video_set_dma(start,end,offset) \ start 174 arch/arm/include/asm/hardware/iomd.h outl (SCREEN_START + start, VDMA_START); \ start 16 arch/arm/include/asm/hardware/memc.h #define video_set_dma(start,end,offset) \ start 18 arch/arm/include/asm/hardware/memc.h memc_write (VDMA_START, (start >> 2)); \ start 269 arch/arm/include/asm/kvm_host.h unsigned long start, unsigned long end); start 274 arch/arm/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); start 33 arch/arm/include/asm/mach/pci.h resource_size_t start, start 40 arch/arm/include/asm/outercache.h static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) start 43 arch/arm/include/asm/outercache.h outer_cache.inv_range(start, end); start 51 arch/arm/include/asm/outercache.h static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) start 54 arch/arm/include/asm/outercache.h outer_cache.clean_range(start, end); start 62 arch/arm/include/asm/outercache.h static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) start 65 arch/arm/include/asm/outercache.h outer_cache.flush_range(start, end); start 108 arch/arm/include/asm/outercache.h static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) start 110 arch/arm/include/asm/outercache.h static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) start 112 arch/arm/include/asm/outercache.h static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) start 21 arch/arm/include/asm/setup.h extern int arm_add_memory(u64 start, u64 size); start 604 arch/arm/include/asm/tlbflush.h #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) start 620 arch/arm/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 621 arch/arm/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 652 arch/arm/include/asm/tlbflush.h static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } start 653 arch/arm/include/asm/tlbflush.h static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { } start 660 arch/arm/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 661 arch/arm/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 27 arch/arm/include/asm/unwind.h const struct unwind_idx *start; start 34 arch/arm/include/asm/unwind.h extern struct unwind_table *unwind_table_add(unsigned long start, start 44 arch/arm/include/uapi/asm/setup.h __u32 start; /* physical start address */ start 68 arch/arm/include/uapi/asm/setup.h __u32 start; /* starting block of floppy-based RAM disk image */ start 82 arch/arm/include/uapi/asm/setup.h __u32 start; /* physical start address */ start 81 arch/arm/kernel/atags_compat.c static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size) start 87 arch/arm/kernel/atags_compat.c tag->u.mem.start = start; start 131 arch/arm/kernel/atags_compat.c tag->u.ramdisk.start = params->u1.s.rd_start; start 136 arch/arm/kernel/atags_compat.c tag->u.initrd.start = params->u1.s.initrd_start; start 67 arch/arm/kernel/atags_parse.c return arm_add_memory(tag->u.mem.start, tag->u.mem.size); start 93 arch/arm/kernel/atags_parse.c rd_image_start = tag->u.ramdisk.start; start 185 arch/arm/kernel/atags_parse.c default_tags.mem.start = PHYS_OFFSET; start 87 arch/arm/kernel/bios32.c dev->resource[0].end -= dev->resource[0].start; start 88 arch/arm/kernel/bios32.c dev->resource[0].start = 0; start 133 arch/arm/kernel/bios32.c dev->resource[0].end -= dev->resource[0].start; start 134 arch/arm/kernel/bios32.c dev->resource[0].start = 0; start 151 arch/arm/kernel/bios32.c dev->resource[i].start = 0; start 172 arch/arm/kernel/bios32.c if ((r->start & ~0x80) == 0x374) { start 173 arch/arm/kernel/bios32.c r->start |= 2; start 174 arch/arm/kernel/bios32.c r->end = r->start; start 223 arch/arm/kernel/bios32.c dev->resource[0].start = 0; start 227 arch/arm/kernel/bios32.c dev->resource[1].start = 0; start 264 arch/arm/kernel/bios32.c dev->resource[i].start = 0; start 437 arch/arm/kernel/bios32.c sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; start 596 arch/arm/kernel/bios32.c resource_size_t start = res->start; start 599 arch/arm/kernel/bios32.c if (res->flags & IORESOURCE_IO && start & 0x300) start 600 arch/arm/kernel/bios32.c start = (start + 0x3ff) & ~0x3ff; start 602 arch/arm/kernel/bios32.c start = (start + align - 1) & ~(align - 1); start 608 arch/arm/kernel/bios32.c start, size, align); start 610 arch/arm/kernel/bios32.c return start; start 139 arch/arm/kernel/dma-isa.c .start = 0x0000, start 143 arch/arm/kernel/dma-isa.c .start = 0x0080, start 147 arch/arm/kernel/dma-isa.c .start = 0x00c0, start 151 arch/arm/kernel/dma-isa.c .start = 0x0480, start 94 arch/arm/kernel/fiq.c void set_fiq_handler(void *start, unsigned int length) start 99 arch/arm/kernel/fiq.c memcpy(base + offset, start, length); start 159 arch/arm/kernel/fiq.c void __init init_FIQ(int start) start 164 arch/arm/kernel/fiq.c fiq_start = start; start 45 arch/arm/kernel/machine_kexec.c image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET start 181 arch/arm/kernel/machine_kexec.c kexec_start_address = image->start; start 501 arch/arm/kernel/perf_event_v6.c cpu_pmu->start = armv6pmu_start; start 552 arch/arm/kernel/perf_event_v6.c cpu_pmu->start = armv6pmu_start; start 1177 arch/arm/kernel/perf_event_v7.c cpu_pmu->start = armv7pmu_start; start 380 arch/arm/kernel/perf_event_xscale.c cpu_pmu->start = xscale1pmu_start; start 749 arch/arm/kernel/perf_event_xscale.c cpu_pmu->start = xscale2pmu_start; start 171 arch/arm/kernel/setup.c .start = 0, start 177 arch/arm/kernel/setup.c .start = 0, start 183 arch/arm/kernel/setup.c .start = 0, start 196 arch/arm/kernel/setup.c .start = 0x3bc, start 202 arch/arm/kernel/setup.c .start = 0x378, start 208 arch/arm/kernel/setup.c .start = 0x278, start 749 arch/arm/kernel/setup.c int __init arm_add_memory(u64 start, u64 size) start 757 arch/arm/kernel/setup.c aligned_start = PAGE_ALIGN(start); start 758 arch/arm/kernel/setup.c if (aligned_start > start + size) start 761 arch/arm/kernel/setup.c size -= aligned_start - start; start 766 arch/arm/kernel/setup.c (long long)start); start 772 arch/arm/kernel/setup.c (long long)start); start 796 arch/arm/kernel/setup.c start = aligned_start; start 806 arch/arm/kernel/setup.c memblock_add(start, size); start 819 arch/arm/kernel/setup.c u64 start; start 833 arch/arm/kernel/setup.c start = PHYS_OFFSET; start 836 arch/arm/kernel/setup.c start = memparse(endp + 1, NULL); start 838 arch/arm/kernel/setup.c arm_add_memory(start, size); start 849 arch/arm/kernel/setup.c kernel_code.start = virt_to_phys(_text); start 851 arch/arm/kernel/setup.c kernel_data.start = virt_to_phys(_sdata); start 855 arch/arm/kernel/setup.c phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); start 864 arch/arm/kernel/setup.c boot_alias_start = phys_to_idmap(start); start 871 arch/arm/kernel/setup.c res->start = boot_alias_start; start 882 arch/arm/kernel/setup.c res->start = start; start 888 arch/arm/kernel/setup.c if (kernel_code.start >= res->start && start 891 arch/arm/kernel/setup.c if (kernel_data.start >= res->start && start 897 arch/arm/kernel/setup.c video_ram.start = mdesc->video_start; start 1012 arch/arm/kernel/setup.c unsigned long long start; start 1014 arch/arm/kernel/setup.c start = memblock_find_in_range(crash_base, start 1017 arch/arm/kernel/setup.c if (start != crash_base) { start 1036 arch/arm/kernel/setup.c crashk_res.start = crash_base; start 1050 arch/arm/kernel/setup.c crashk_boot_res.start = phys_to_idmap(crash_base); start 1051 arch/arm/kernel/setup.c crashk_boot_res.end = crashk_boot_res.start + crash_size - 1; start 1321 arch/arm/kernel/setup.c .start = c_start, start 221 arch/arm/kernel/smp_tlb.c unsigned long start, unsigned long end) start 226 arch/arm/kernel/smp_tlb.c ta.ta_start = start; start 231 arch/arm/kernel/smp_tlb.c local_flush_tlb_range(vma, start, end); start 235 arch/arm/kernel/smp_tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 239 arch/arm/kernel/smp_tlb.c ta.ta_start = start; start 243 arch/arm/kernel/smp_tlb.c local_flush_tlb_kernel_range(start, end); start 41 arch/arm/kernel/tcm.c .start = DTCM_OFFSET, start 48 arch/arm/kernel/tcm.c .start = ITCM_OFFSET, start 262 arch/arm/kernel/tcm.c char *start; start 324 arch/arm/kernel/tcm.c start = &__sdtcm_data; start 327 arch/arm/kernel/tcm.c memcpy(start, ram, dtcm_code_sz); start 329 arch/arm/kernel/tcm.c start, end); start 362 arch/arm/kernel/tcm.c start = &__sitcm_text; start 365 arch/arm/kernel/tcm.c memcpy(start, ram, itcm_code_sz); start 367 arch/arm/kernel/tcm.c start, end); start 555 arch/arm/kernel/traps.c __do_cache_op(unsigned long start, unsigned long end) start 560 arch/arm/kernel/traps.c unsigned long chunk = min(PAGE_SIZE, end - start); start 565 arch/arm/kernel/traps.c ret = flush_cache_user_range(start, start + chunk); start 570 arch/arm/kernel/traps.c start += chunk; start 571 arch/arm/kernel/traps.c } while (start < end); start 577 arch/arm/kernel/traps.c do_cache_op(unsigned long start, unsigned long end, int flags) start 579 arch/arm/kernel/traps.c if (end < start || flags) start 582 arch/arm/kernel/traps.c if (!access_ok(start, end - start)) start 585 arch/arm/kernel/traps.c return __do_cache_op(start, end); start 103 arch/arm/kernel/unwind.c const struct unwind_idx *start, start 110 arch/arm/kernel/unwind.c __func__, addr, start, origin, stop); start 116 arch/arm/kernel/unwind.c if (addr < (unsigned long)start) start 121 arch/arm/kernel/unwind.c start = origin; start 124 arch/arm/kernel/unwind.c addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; start 126 arch/arm/kernel/unwind.c while (start < stop - 1) { start 127 arch/arm/kernel/unwind.c const struct unwind_idx *mid = start + ((stop - start) >> 1); start 133 arch/arm/kernel/unwind.c if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < start 139 arch/arm/kernel/unwind.c (unsigned long)start); start 140 arch/arm/kernel/unwind.c start = mid; start 144 arch/arm/kernel/unwind.c if (likely(start->addr_offset <= addr_prel31)) start 145 arch/arm/kernel/unwind.c return start; start 153 arch/arm/kernel/unwind.c const struct unwind_idx *start, const struct unwind_idx *stop) start 155 arch/arm/kernel/unwind.c pr_debug("%s(%p, %p)\n", __func__, start, stop); start 156 arch/arm/kernel/unwind.c while (start < stop) { start 157 arch/arm/kernel/unwind.c const struct unwind_idx *mid = start + ((stop - start) >> 1); start 161 arch/arm/kernel/unwind.c start = mid + 1; start 195 arch/arm/kernel/unwind.c idx = search_index(addr, table->start, start 500 arch/arm/kernel/unwind.c struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, start 507 arch/arm/kernel/unwind.c pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, start 513 arch/arm/kernel/unwind.c tab->start = (const struct unwind_idx *)start; start 514 arch/arm/kernel/unwind.c tab->stop = (const struct unwind_idx *)(start + size); start 515 arch/arm/kernel/unwind.c tab->origin = unwind_find_origin(tab->start, tab->stop); start 47 arch/arm/lib/delay.c cycles_t start = get_cycles(); start 49 arch/arm/lib/delay.c while ((get_cycles() - start) < cycles) start 71 arch/arm/mach-cns3xxx/cns3420vb.c .start = CNS3XXX_FLASH_BASE, start 114 arch/arm/mach-cns3xxx/cns3420vb.c .start = CNS3XXX_USB_BASE, start 119 arch/arm/mach-cns3xxx/cns3420vb.c .start = IRQ_CNS3XXX_USB_EHCI, start 177 arch/arm/mach-cns3xxx/cns3420vb.c .start = CNS3XXX_USB_OHCI_BASE, start 182 arch/arm/mach-cns3xxx/cns3420vb.c .start = IRQ_CNS3XXX_USB_OHCI, start 26 arch/arm/mach-cns3xxx/devices.c .start = CNS3XXX_SATA2_BASE, start 31 arch/arm/mach-cns3xxx/devices.c .start = IRQ_CNS3XXX_SATA, start 77 arch/arm/mach-cns3xxx/devices.c .start = CNS3XXX_SDIO_BASE, start 82 arch/arm/mach-cns3xxx/devices.c .start = IRQ_CNS3XXX_SDIO, start 147 arch/arm/mach-cns3xxx/pcie.c .start = CNS3XXX_PCIE0_IO_BASE, start 153 arch/arm/mach-cns3xxx/pcie.c .start = CNS3XXX_PCIE0_MEM_BASE, start 166 arch/arm/mach-cns3xxx/pcie.c .start = CNS3XXX_PCIE1_IO_BASE, start 172 arch/arm/mach-cns3xxx/pcie.c .start = CNS3XXX_PCIE1_MEM_BASE, start 231 arch/arm/mach-cns3xxx/pcie.c u16 mem_base = cnspci->res_mem.start >> 16; start 233 arch/arm/mach-cns3xxx/pcie.c u16 io_base = cnspci->res_io.start >> 16; start 319 arch/arm/mach-davinci/board-da830-evm.c .start = DA8XX_AEMIF_CS3_BASE, start 324 arch/arm/mach-davinci/board-da830-evm.c .start = DA8XX_AEMIF_CTL_BASE, start 344 arch/arm/mach-davinci/board-da830-evm.c .start = DA8XX_AEMIF_CTL_BASE, start 184 arch/arm/mach-davinci/board-da850-evm.c .start = DA8XX_AEMIF_CS2_BASE, start 250 arch/arm/mach-davinci/board-da850-evm.c .start = DA8XX_AEMIF_CS3_BASE, start 255 arch/arm/mach-davinci/board-da850-evm.c .start = DA8XX_AEMIF_CTL_BASE, start 263 arch/arm/mach-davinci/board-da850-evm.c .start = DA8XX_AEMIF_CTL_BASE, start 92 arch/arm/mach-davinci/board-dm355-evm.c .start = DM355_ASYNC_EMIF_DATA_CE0_BASE, start 96 arch/arm/mach-davinci/board-dm355-evm.c .start = DM355_ASYNC_EMIF_CONTROL_BASE, start 174 arch/arm/mach-davinci/board-dm355-evm.c .start = 0x04014000, start 179 arch/arm/mach-davinci/board-dm355-evm.c .start = 0x04014002, start 406 arch/arm/mach-davinci/board-dm355-evm.c dm355evm_dm9000_rsrc[2].start = gpio_to_irq(1); start 86 arch/arm/mach-davinci/board-dm355-leopard.c .start = DM355_ASYNC_EMIF_DATA_CE0_BASE, start 90 arch/arm/mach-davinci/board-dm355-leopard.c .start = DM355_ASYNC_EMIF_CONTROL_BASE, start 152 arch/arm/mach-davinci/board-dm355-leopard.c .start = 0x04000000, start 157 arch/arm/mach-davinci/board-dm355-leopard.c .start = 0x04000016, start 245 arch/arm/mach-davinci/board-dm355-leopard.c dm355leopard_dm9000_rsrc[2].start = gpio_to_irq(9); start 154 arch/arm/mach-davinci/board-dm365-evm.c .start = DM365_ASYNC_EMIF_DATA_CE0_BASE, start 158 arch/arm/mach-davinci/board-dm365-evm.c .start = DM365_ASYNC_EMIF_CONTROL_BASE, start 178 arch/arm/mach-davinci/board-dm365-evm.c .start = DM365_ASYNC_EMIF_CONTROL_BASE, start 96 arch/arm/mach-davinci/board-dm644x-evm.c .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE, start 171 arch/arm/mach-davinci/board-dm644x-evm.c .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE, start 175 arch/arm/mach-davinci/board-dm644x-evm.c .start = DM644X_ASYNC_EMIF_CONTROL_BASE, start 183 arch/arm/mach-davinci/board-dm644x-evm.c .start = DM644X_ASYNC_EMIF_CONTROL_BASE, start 101 arch/arm/mach-davinci/board-dm646x-evm.c .start = DM646X_ASYNC_EMIF_CS2_SPACE_BASE, start 105 arch/arm/mach-davinci/board-dm646x-evm.c .start = DM646X_ASYNC_EMIF_CONTROL_BASE, start 125 arch/arm/mach-davinci/board-dm646x-evm.c .start = DM646X_ASYNC_EMIF_CONTROL_BASE, start 443 arch/arm/mach-davinci/board-mityomapl138.c .start = DA8XX_AEMIF_CS3_BASE, start 448 arch/arm/mach-davinci/board-mityomapl138.c .start = DA8XX_AEMIF_CTL_BASE, start 468 arch/arm/mach-davinci/board-mityomapl138.c .start = DA8XX_AEMIF_CTL_BASE, start 100 arch/arm/mach-davinci/board-neuros-osd2.c .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE, start 104 arch/arm/mach-davinci/board-neuros-osd2.c .start = DM644X_ASYNC_EMIF_CONTROL_BASE, start 221 arch/arm/mach-davinci/board-omapl138-hawk.c .start = DA8XX_AEMIF_CS3_BASE, start 226 arch/arm/mach-davinci/board-omapl138-hawk.c .start = DA8XX_AEMIF_CTL_BASE, start 234 arch/arm/mach-davinci/board-omapl138-hawk.c .start = DA8XX_AEMIF_CTL_BASE, start 62 arch/arm/mach-davinci/board-sffsdr.c .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE, start 66 arch/arm/mach-davinci/board-sffsdr.c .start = DM644X_ASYNC_EMIF_CONTROL_BASE, start 715 arch/arm/mach-davinci/da830.c .start = DA8XX_CP_INTC_BASE, start 751 arch/arm/mach-davinci/da830.c .start = DA8XX_PSC0_BASE, start 766 arch/arm/mach-davinci/da830.c .start = DA8XX_PSC1_BASE, start 518 arch/arm/mach-davinci/da850.c .start = DA8XX_VPIF_BASE, start 537 arch/arm/mach-davinci/da850.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), start 556 arch/arm/mach-davinci/da850.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), start 561 arch/arm/mach-davinci/da850.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), start 636 arch/arm/mach-davinci/da850.c .start = DA8XX_CP_INTC_BASE, start 674 arch/arm/mach-davinci/da850.c .start = DA850_PLL1_BASE, start 694 arch/arm/mach-davinci/da850.c .start = DA8XX_PSC0_BASE, start 709 arch/arm/mach-davinci/da850.c .start = DA8XX_PSC1_BASE, start 154 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_TPCC_BASE, start 160 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_TPTC0_BASE, start 166 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_TPTC1_BASE, start 172 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CCINT0), start 177 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CCERRINT), start 185 arch/arm/mach-davinci/devices-da8xx.c .start = DA850_TPCC1_BASE, start 191 arch/arm/mach-davinci/devices-da8xx.c .start = DA850_TPTC2_BASE, start 197 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_CCINT1), start 202 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_CCERRINT1), start 302 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_I2C0_BASE, start 307 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT0), start 322 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_I2C1_BASE, start 327 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT1), start 358 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_WDOG_BASE, start 378 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_EMAC_CPPI_PORT_BASE, start 383 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_THRESH_PULSE), start 388 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_PULSE), start 393 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_TX_PULSE), start 398 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_MISC_PULSE), start 424 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_EMAC_MDIO_BASE, start 451 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA830_MCASP1_REG_BASE, start 458 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA830_DMA_MCASP1_AXEVT, start 465 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA830_DMA_MCASP1_AREVT, start 471 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT), start 486 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA830_MCASP2_REG_BASE, start 493 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA830_DMA_MCASP2_AXEVT, start 500 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA830_DMA_MCASP2_AREVT, start 506 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT), start 521 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA8XX_MCASP0_REG_BASE, start 528 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA8XX_DMA_MCASP0_AXEVT, start 535 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_DA8XX_DMA_MCASP0_AREVT, start 541 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT), start 584 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_PRUSS_MEM_BASE, start 589 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT0), start 594 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT1), start 599 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT2), start 604 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT3), start 609 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT4), start 614 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT5), start 619 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT6), start 624 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT7), start 670 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_LCD_CNTRL_BASE, start 675 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_LCDINT), start 699 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_GPIO_BASE, start 704 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO0), start 709 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO1), start 714 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO2), start 719 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO3), start 724 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO4), start 729 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO5), start 734 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO6), start 739 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO7), start 744 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO8), start 765 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_MMCSD0_BASE, start 770 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MMCSDINT0), start 792 arch/arm/mach-davinci/devices-da8xx.c .start = DA850_MMCSD1_BASE, start 797 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_MMCSDINT0_1), start 820 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_SYSCFG0_BASE + DA8XX_HOST1CFG_REG, start 826 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_SYSCFG0_BASE + DA8XX_CHIPSIG_REG, start 832 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_DSP_L2_RAM_BASE, start 838 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_DSP_L1P_RAM_BASE, start 844 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_DSP_L1D_RAM_BASE, start 849 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CHIPINT0), start 935 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_RTC_BASE, start 940 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC), start 945 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC), start 978 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_DDR2_CTL_BASE, start 1008 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_SPI0_BASE, start 1013 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT0), start 1021 arch/arm/mach-davinci/devices-da8xx.c .start = DA830_SPI1_BASE, start 1026 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT1), start 1076 arch/arm/mach-davinci/devices-da8xx.c da8xx_spi1_resources[0].start = DA850_SPI1_BASE; start 1097 arch/arm/mach-davinci/devices-da8xx.c .start = DA850_SATA_BASE, start 1102 arch/arm/mach-davinci/devices-da8xx.c .start = DA8XX_SYSCFG1_BASE + DA8XX_PWRDN_REG, start 1107 arch/arm/mach-davinci/devices-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA850_SATAINT), start 49 arch/arm/mach-davinci/devices.c .start = DAVINCI_I2C_BASE, start 54 arch/arm/mach-davinci/devices.c .start = DAVINCI_INTC_IRQ(IRQ_I2C), start 77 arch/arm/mach-davinci/devices.c .start = DAVINCI_ATA_BASE, start 82 arch/arm/mach-davinci/devices.c .start = DAVINCI_INTC_IRQ(IRQ_IDE), start 125 arch/arm/mach-davinci/devices.c .start = DAVINCI_MMCSD0_BASE, start 131 arch/arm/mach-davinci/devices.c .start = DAVINCI_INTC_IRQ(IRQ_MMCINT), start 135 arch/arm/mach-davinci/devices.c .start = DAVINCI_INTC_IRQ(IRQ_SDIOINT), start 155 arch/arm/mach-davinci/devices.c .start = DM355_MMCSD1_BASE, start 161 arch/arm/mach-davinci/devices.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_MMCINT1), start 164 arch/arm/mach-davinci/devices.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_SDIOINT1), start 214 arch/arm/mach-davinci/devices.c mmcsd1_resources[0].start = DM365_MMCSD1_BASE; start 217 arch/arm/mach-davinci/devices.c mmcsd1_resources[2].start = DAVINCI_INTC_IRQ( start 227 arch/arm/mach-davinci/devices.c mmcsd0_resources[0].start = DM355_MMCSD0_BASE; start 229 arch/arm/mach-davinci/devices.c mmcsd0_resources[2].start = DAVINCI_INTC_IRQ( start 238 arch/arm/mach-davinci/devices.c mmcsd0_resources[0].start = DM365_MMCSD0_BASE; start 241 arch/arm/mach-davinci/devices.c mmcsd0_resources[2].start = DAVINCI_INTC_IRQ( start 276 arch/arm/mach-davinci/devices.c .start = DAVINCI_WDOG_BASE, start 54 arch/arm/mach-davinci/dm355.c .start = 0x01c66000, start 59 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_SPINT0_0), start 261 arch/arm/mach-davinci/dm355.c .start = 0x01c00000, start 267 arch/arm/mach-davinci/dm355.c .start = 0x01c10000, start 273 arch/arm/mach-davinci/dm355.c .start = 0x01c10400, start 279 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), start 284 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), start 303 arch/arm/mach-davinci/dm355.c .start = DAVINCI_ASP1_BASE, start 308 arch/arm/mach-davinci/dm355.c .start = DAVINCI_DMA_ASP1_TX, start 313 arch/arm/mach-davinci/dm355.c .start = DAVINCI_DMA_ASP1_RX, start 341 arch/arm/mach-davinci/dm355.c .start = 0x01c70800, start 348 arch/arm/mach-davinci/dm355.c .start = 0x01c70000, start 364 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_VDINT0), start 369 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_VDINT1), start 380 arch/arm/mach-davinci/dm355.c .start = 0x01c70600, start 409 arch/arm/mach-davinci/dm355.c .start = DM355_OSD_BASE, start 428 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), start 434 arch/arm/mach-davinci/dm355.c .start = DM355_VENC_BASE, start 440 arch/arm/mach-davinci/dm355.c .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG, start 448 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), start 454 arch/arm/mach-davinci/dm355.c .start = DM355_VENC_BASE, start 548 arch/arm/mach-davinci/dm355.c .start = DAVINCI_GPIO_BASE, start 553 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK0), start 558 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK1), start 563 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK2), start 568 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK3), start 573 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK4), start 578 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK5), start 583 arch/arm/mach-davinci/dm355.c .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK6), start 758 arch/arm/mach-davinci/dm355.c .start = DAVINCI_PLL2_BASE, start 802 arch/arm/mach-davinci/dm355.c .start = DAVINCI_ARM_INTC_BASE, start 224 arch/arm/mach-davinci/dm365.c .start = 0x01c66000, start 229 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_SPIINT0_0), start 266 arch/arm/mach-davinci/dm365.c .start = DAVINCI_GPIO_BASE, start 271 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO0), start 276 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO1), start 281 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO2), start 286 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO3), start 291 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO4), start 296 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO5), start 301 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO6), start 306 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO7), start 336 arch/arm/mach-davinci/dm365.c .start = DM365_EMAC_BASE, start 341 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXTHRESH), start 346 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXPULSE), start 351 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_TXPULSE), start 356 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_MISCPULSE), start 374 arch/arm/mach-davinci/dm365.c .start = DM365_EMAC_MDIO_BASE, start 493 arch/arm/mach-davinci/dm365.c .start = 0x01c00000, start 499 arch/arm/mach-davinci/dm365.c .start = 0x01c10000, start 505 arch/arm/mach-davinci/dm365.c .start = 0x01c10400, start 511 arch/arm/mach-davinci/dm365.c .start = 0x01c10800, start 517 arch/arm/mach-davinci/dm365.c .start = 0x01c10c00, start 523 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), start 528 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), start 547 arch/arm/mach-davinci/dm365.c .start = DAVINCI_DM365_ASP0_BASE, start 552 arch/arm/mach-davinci/dm365.c .start = DAVINCI_DMA_ASP0_TX, start 557 arch/arm/mach-davinci/dm365.c .start = DAVINCI_DMA_ASP0_RX, start 572 arch/arm/mach-davinci/dm365.c .start = DAVINCI_DM365_VC_BASE, start 577 arch/arm/mach-davinci/dm365.c .start = DAVINCI_DMA_VC_TX, start 582 arch/arm/mach-davinci/dm365.c .start = DAVINCI_DMA_VC_RX, start 597 arch/arm/mach-davinci/dm365.c .start = DM365_RTC_BASE, start 602 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_RTCINT), start 626 arch/arm/mach-davinci/dm365.c .start = DM365_KEYSCAN_BASE, start 632 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_DM365_KEYINT), start 804 arch/arm/mach-davinci/dm365.c .start = 0x01c70000, start 811 arch/arm/mach-davinci/dm365.c .start = 0x01c70200, start 827 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_VDINT0), start 832 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_VDINT1), start 862 arch/arm/mach-davinci/dm365.c .start = 0x01c71000, start 868 arch/arm/mach-davinci/dm365.c .start = 0x1C7C000, start 874 arch/arm/mach-davinci/dm365.c .start = 0x1C7C400, start 893 arch/arm/mach-davinci/dm365.c .start = DM365_OSD_BASE, start 914 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), start 920 arch/arm/mach-davinci/dm365.c .start = DM365_VENC_BASE, start 926 arch/arm/mach-davinci/dm365.c .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG, start 934 arch/arm/mach-davinci/dm365.c .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), start 940 arch/arm/mach-davinci/dm365.c .start = DM365_VENC_BASE, start 1059 arch/arm/mach-davinci/dm365.c .start = DAVINCI_ARM_INTC_BASE, start 60 arch/arm/mach-davinci/dm644x.c .start = DM644X_EMAC_BASE, start 65 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_EMACINT), start 83 arch/arm/mach-davinci/dm644x.c .start = DM644X_EMAC_MDIO_BASE, start 248 arch/arm/mach-davinci/dm644x.c .start = 0x01c00000, start 254 arch/arm/mach-davinci/dm644x.c .start = 0x01c10000, start 260 arch/arm/mach-davinci/dm644x.c .start = 0x01c10400, start 266 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), start 271 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), start 291 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_ASP0_BASE, start 296 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_DMA_ASP0_TX, start 301 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_DMA_ASP0_RX, start 320 arch/arm/mach-davinci/dm644x.c .start = DM644X_VPSS_BASE, start 336 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_VDINT0), start 341 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_VDINT1), start 351 arch/arm/mach-davinci/dm644x.c .start = 0x01c70400, start 383 arch/arm/mach-davinci/dm644x.c .start = DM644X_OSD_BASE, start 404 arch/arm/mach-davinci/dm644x.c .start = DM644X_VENC_BASE, start 448 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), start 492 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_GPIO_BASE, start 497 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK0), start 502 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK1), start 507 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK2), start 512 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK3), start 517 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK4), start 694 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_PLL2_BASE, start 738 arch/arm/mach-davinci/dm644x.c .start = DAVINCI_ARM_INTC_BASE, start 63 arch/arm/mach-davinci/dm646x.c .start = DM646X_EMAC_BASE, start 68 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXTHINT), start 73 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXINT), start 78 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACTXINT), start 83 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACMISCINT), start 101 arch/arm/mach-davinci/dm646x.c .start = DM646X_EMAC_MDIO_BASE, start 249 arch/arm/mach-davinci/dm646x.c .start = 0x01c00000, start 255 arch/arm/mach-davinci/dm646x.c .start = 0x01c10000, start 261 arch/arm/mach-davinci/dm646x.c .start = 0x01c10400, start 267 arch/arm/mach-davinci/dm646x.c .start = 0x01c10800, start 273 arch/arm/mach-davinci/dm646x.c .start = 0x01c10c00, start 279 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), start 284 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), start 303 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_DM646X_MCASP0_REG_BASE, start 309 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_DM646X_DMA_MCASP0_AXEVT0, start 315 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_DM646X_DMA_MCASP0_AREVT0, start 321 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP0TXINT), start 326 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP0RXINT), start 335 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_DM646X_MCASP1_REG_BASE, start 341 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_DM646X_DMA_MCASP1_AXEVT1, start 347 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP1TXINT), start 375 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_VPIF_BASE, start 394 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT2), start 399 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT3), start 418 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT0), start 423 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT1), start 442 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_GPIO_BASE, start 447 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK0), start 452 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK1), start 457 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK2), start 678 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_PLL2_BASE, start 699 arch/arm/mach-davinci/dm646x.c .start = DAVINCI_ARM_INTC_BASE, start 69 arch/arm/mach-davinci/usb-da8xx.c .start = DA8XX_USB0_BASE, start 74 arch/arm/mach-davinci/usb-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_USB_INT), start 104 arch/arm/mach-davinci/usb-da8xx.c .start = DA8XX_USB1_BASE, start 109 arch/arm/mach-davinci/usb-da8xx.c .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_IRQN), start 36 arch/arm/mach-davinci/usb.c .start = DAVINCI_USB_OTG_BASE, start 41 arch/arm/mach-davinci/usb.c .start = DAVINCI_INTC_IRQ(IRQ_USBINT), start 73 arch/arm/mach-davinci/usb.c usb_dev.resource[1].start = DAVINCI_INTC_IRQ(IRQ_DM646X_USBINT); start 74 arch/arm/mach-davinci/usb.c usb_dev.resource[2].start = DAVINCI_INTC_IRQ( start 285 arch/arm/mach-dove/common.c .start = DOVE_SDIO0_PHYS_BASE, start 289 arch/arm/mach-dove/common.c .start = IRQ_DOVE_SDIO0, start 313 arch/arm/mach-dove/common.c .start = DOVE_SDIO1_PHYS_BASE, start 317 arch/arm/mach-dove/common.c .start = IRQ_DOVE_SDIO1, start 20 arch/arm/mach-dove/mpp.c int start; start 27 arch/arm/mach-dove/mpp.c .start = 24, start 31 arch/arm/mach-dove/mpp.c .start = 40, start 35 arch/arm/mach-dove/mpp.c .start = 46, start 39 arch/arm/mach-dove/mpp.c .start = 58, start 43 arch/arm/mach-dove/mpp.c .start = 62, start 50 arch/arm/mach-dove/mpp.c static void __init dove_mpp_gpio_mode(int start, int end, int gpio_mode) start 54 arch/arm/mach-dove/mpp.c for (i = start; i <= end; i++) start 140 arch/arm/mach-dove/mpp.c dove_mpp_gpio_mode(dove_mpp_grp[num].start, start 69 arch/arm/mach-dove/pcie.c pp->res.start = DOVE_PCIE0_MEM_PHYS_BASE; start 70 arch/arm/mach-dove/pcie.c pp->res.end = pp->res.start + DOVE_PCIE0_MEM_SIZE - 1; start 72 arch/arm/mach-dove/pcie.c pp->res.start = DOVE_PCIE1_MEM_PHYS_BASE; start 73 arch/arm/mach-dove/pcie.c pp->res.end = pp->res.start + DOVE_PCIE1_MEM_SIZE - 1; start 147 arch/arm/mach-dove/pcie.c dev->resource[i].start = 0; start 257 arch/arm/mach-ebsa110/core.c .start = 0x220, start 261 arch/arm/mach-ebsa110/core.c .start = IRQ_EBSA110_ETHERNET, start 276 arch/arm/mach-ep93xx/core.c resource_size_t start, resource_size_t size) start 280 arch/arm/mach-ep93xx/core.c ep93xx_flash_resource.start = start; start 281 arch/arm/mach-ep93xx/core.c ep93xx_flash_resource.end = start + size - 1; start 23 arch/arm/mach-ep93xx/platform.h resource_size_t start, resource_size_t size); start 103 arch/arm/mach-ep93xx/snappercl15.c .start = SNAPPERCL15_NAND_BASE, start 140 arch/arm/mach-ep93xx/ts72xx.c .start = 0, /* filled in later */ start 155 arch/arm/mach-ep93xx/ts72xx.c resource_size_t start) start 163 arch/arm/mach-ep93xx/ts72xx.c ts72xx_nand_resource[0].start = start; start 164 arch/arm/mach-ep93xx/ts72xx.c ts72xx_nand_resource[0].end = start + SZ_16M - 1; start 291 arch/arm/mach-ep93xx/ts72xx.c .start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE, start 106 arch/arm/mach-footbridge/isa-irq.c .start = 0x20, start 112 arch/arm/mach-footbridge/isa-irq.c .start = 0xa0, start 17 arch/arm/mach-footbridge/isa.c .start = 0x70, start 22 arch/arm/mach-footbridge/isa.c .start = IRQ_ISA_RTC_ALARM, start 37 arch/arm/mach-footbridge/isa.c .start = 0x3f8, start 42 arch/arm/mach-footbridge/isa.c .start = 0x2f8, start 88 arch/arm/mach-highbank/highbank.c if (res->start == 0xfff50000) start 90 arch/arm/mach-highbank/highbank.c else if (res->start == 0xfff51000) start 309 arch/arm/mach-hisi/platmcpm.c fabric_phys_addr = fab_res.start; start 200 arch/arm/mach-imx/3ds_debugboard.c smsc911x_resources[0].start = LAN9217_BASE_ADDR(base); start 202 arch/arm/mach-imx/3ds_debugboard.c smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET); start 36 arch/arm/mach-imx/devices/platform-fec.c .start = data->iobase, start 40 arch/arm/mach-imx/devices/platform-fec.c .start = data->irq, start 33 arch/arm/mach-imx/devices/platform-flexcan.c .start = data->iobase, start 37 arch/arm/mach-imx/devices/platform-flexcan.c .start = data->irq, start 39 arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c .start = data->iobase, start 43 arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c .start = data->irq, start 14 arch/arm/mach-imx/devices/platform-gpio-mxc.c .start = iobase, start 18 arch/arm/mach-imx/devices/platform-gpio-mxc.c .start = irq, start 22 arch/arm/mach-imx/devices/platform-gpio-mxc.c .start = irq_high, start 13 arch/arm/mach-imx/devices/platform-imx-dma.c .start = iobase, start 17 arch/arm/mach-imx/devices/platform-imx-dma.c .start = irq, start 21 arch/arm/mach-imx/devices/platform-imx-dma.c .start = irq_err, start 36 arch/arm/mach-imx/devices/platform-imx-dma.c .start = iobase, start 40 arch/arm/mach-imx/devices/platform-imx-dma.c .start = irq, start 35 arch/arm/mach-imx/devices/platform-imx-fb.c .start = data->iobase, start 39 arch/arm/mach-imx/devices/platform-imx-fb.c .start = data->irq, start 61 arch/arm/mach-imx/devices/platform-imx-i2c.c .start = data->iobase, start 65 arch/arm/mach-imx/devices/platform-imx-i2c.c .start = data->irq, start 42 arch/arm/mach-imx/devices/platform-imx-keypad.c .start = data->iobase, start 46 arch/arm/mach-imx/devices/platform-imx-keypad.c .start = data->irq, start 63 arch/arm/mach-imx/devices/platform-imx-ssi.c .start = data->iobase, start 67 arch/arm/mach-imx/devices/platform-imx-ssi.c .start = data->irq, start 73 arch/arm/mach-imx/devices/platform-imx-ssi.c .start = data->dma ## _name, \ start 79 arch/arm/mach-imx/devices/platform-imx-uart.c .start = data->iobase, start 83 arch/arm/mach-imx/devices/platform-imx-uart.c .start = data->irq, start 45 arch/arm/mach-imx/devices/platform-imx2-wdt.c .start = data->iobase, start 26 arch/arm/mach-imx/devices/platform-imx21-hcd.c .start = data->iobase, start 30 arch/arm/mach-imx/devices/platform-imx21-hcd.c .start = data->irq, start 23 arch/arm/mach-imx/devices/platform-imx27-coda.c .start = data->iobase, start 27 arch/arm/mach-imx/devices/platform-imx27-coda.c .start = data->irq, start 36 arch/arm/mach-imx/devices/platform-ipu-core.c .start = data->iobase, start 40 arch/arm/mach-imx/devices/platform-ipu-core.c .start = data->iobase + 0x88, start 44 arch/arm/mach-imx/devices/platform-ipu-core.c .start = data->synirq, start 48 arch/arm/mach-imx/devices/platform-ipu-core.c .start = data->errirq, start 64 arch/arm/mach-imx/devices/platform-ipu-core.c .start = data->iobase + 0x60, start 113 arch/arm/mach-imx/devices/platform-ipu-core.c .start = data->iobase + 0xb4, start 38 arch/arm/mach-imx/devices/platform-mx2-camera.c .start = data->iobasecsi, start 42 arch/arm/mach-imx/devices/platform-mx2-camera.c .start = data->irqcsi, start 46 arch/arm/mach-imx/devices/platform-mx2-camera.c .start = data->iobaseemmaprp, start 50 arch/arm/mach-imx/devices/platform-mx2-camera.c .start = data->irqemmaprp, start 26 arch/arm/mach-imx/devices/platform-mx2-emma.c .start = data->iobase, start 30 arch/arm/mach-imx/devices/platform-mx2-emma.c .start = data->irq, start 49 arch/arm/mach-imx/devices/platform-mxc-ehci.c .start = data->iobase, start 53 arch/arm/mach-imx/devices/platform-mxc-ehci.c .start = data->irq, start 56 arch/arm/mach-imx/devices/platform-mxc-mmc.c .start = data->iobase, start 60 arch/arm/mach-imx/devices/platform-mxc-mmc.c .start = data->irq, start 64 arch/arm/mach-imx/devices/platform-mxc-mmc.c .start = data->dmareq, start 56 arch/arm/mach-imx/devices/platform-mxc_nand.c .start = data->iobase, start 60 arch/arm/mach-imx/devices/platform-mxc_nand.c .start = data->irq, start 64 arch/arm/mach-imx/devices/platform-mxc_nand.c .start = data->axibase, start 31 arch/arm/mach-imx/devices/platform-mxc_rtc.c .start = data->iobase, start 35 arch/arm/mach-imx/devices/platform-mxc_rtc.c .start = data->irq, start 39 arch/arm/mach-imx/devices/platform-mxc_w1.c .start = data->iobase, start 32 arch/arm/mach-imx/devices/platform-pata_imx.c .start = data->iobase, start 37 arch/arm/mach-imx/devices/platform-pata_imx.c .start = data->irq, start 44 arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c .start = data->iobase, start 48 arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c .start = data->irq, start 65 arch/arm/mach-imx/devices/platform-spi_imx.c .start = data->iobase, start 69 arch/arm/mach-imx/devices/platform-spi_imx.c .start = data->irq, start 314 arch/arm/mach-imx/mach-armadillo5x0.c .start = MX31_CS0_BASE_ADDR, start 431 arch/arm/mach-imx/mach-armadillo5x0.c .start = MX31_CS3_BASE_ADDR, start 508 arch/arm/mach-imx/mach-armadillo5x0.c armadillo5x0_smc911x_resources[1].start = start 347 arch/arm/mach-imx/mach-imx27_visstrim_m10.c .start = 0xc0000000, start 78 arch/arm/mach-imx/mach-kzm_arm11_01.c .start = KZM_ARM11_16550, start 118 arch/arm/mach-imx/mach-kzm_arm11_01.c serial8250_resources[1].start = start 145 arch/arm/mach-imx/mach-kzm_arm11_01.c .start = MX31_CS5_BASE_ADDR, start 181 arch/arm/mach-imx/mach-kzm_arm11_01.c kzm_smsc9118_resources[1].start = start 316 arch/arm/mach-imx/mach-mx21ads.c mx21ads_cs8900_resources[1].start = start 173 arch/arm/mach-imx/mach-mx27ads.c .start = 0xc0000000, start 123 arch/arm/mach-imx/mach-mx31ads.c mx31ads_cs8900_resources[1].start = start 74 arch/arm/mach-imx/mach-mx31lilly.c .start = MX31_CS4_BASE_ADDR, start 109 arch/arm/mach-imx/mach-mx31lilly.c .start = 0xa0000000, start 298 arch/arm/mach-imx/mach-mx31lilly.c smsc91x_resources[1].start = start 95 arch/arm/mach-imx/mach-mx31lite.c .start = MX31_CS4_BASE_ADDR, start 197 arch/arm/mach-imx/mach-mx31lite.c .start = 0xa0000000, start 282 arch/arm/mach-imx/mach-mx31lite.c smsc911x_resources[1].start = start 108 arch/arm/mach-imx/mach-mx31moboard.c .start = 0xa0000000, start 132 arch/arm/mach-imx/mach-mx35_3ds.c .start = MX35_CS0_BASE_ADDR, start 175 arch/arm/mach-imx/mach-pcm037.c .start = 0xa0000000, start 196 arch/arm/mach-imx/mach-pcm037.c .start = MX31_CS1_BASE_ADDR + 0x300, start 228 arch/arm/mach-imx/mach-pcm037.c .start = MX31_CS4_BASE_ADDR, start 398 arch/arm/mach-imx/mach-pcm037.c .start = MX31_CS5_BASE_ADDR, start 555 arch/arm/mach-imx/mach-pcm037.c smsc911x_resources[1].start = start 566 arch/arm/mach-imx/mach-pcm037.c pcm970_sja1000_resources[1].start = start 82 arch/arm/mach-imx/mach-pcm043.c .start = 0xa0000000, start 65 arch/arm/mach-imx/mach-qong.c .start = QONG_DNET_BASEADDR, start 85 arch/arm/mach-imx/mach-qong.c dnet_resources[1].start = start 100 arch/arm/mach-imx/mach-qong.c .start = MX31_CS0_BASE_ADDR, start 165 arch/arm/mach-imx/mach-qong.c .start = MX31_CS3_BASE_ADDR, start 90 arch/arm/mach-imx/mach-vpr200.c .start = MX35_CS0_BASE_ADDR, start 444 arch/arm/mach-imx/mmdc.c .start = mmdc_pmu_event_start, start 451 arch/arm/mach-imx/pm-imx6.c base->pbase = res.start; start 452 arch/arm/mach-imx/pm-imx6.c base->vbase = ioremap(res.start, resource_size(&res)); start 207 arch/arm/mach-integrator/impd1.c unsigned long framebase = fb->dev->res.start + 0x01000000; start 237 arch/arm/mach-integrator/impd1.c unsigned long start, size; start 239 arch/arm/mach-integrator/impd1.c start = vma->vm_pgoff + (fb->fb.fix.smem_start >> PAGE_SHIFT); start 242 arch/arm/mach-integrator/impd1.c return remap_pfn_range(vma, vma->vm_start, start, size, start 332 arch/arm/mach-integrator/impd1.c if (!devm_request_mem_region(&dev->dev, dev->resource.start, start 341 arch/arm/mach-integrator/impd1.c impd1->base = devm_ioremap(&dev->dev, dev->resource.start, SZ_4K); start 348 arch/arm/mach-integrator/impd1.c dev->resource.start + 0x03000000, start 353 arch/arm/mach-integrator/impd1.c dev->resource.start + 0x03000000, start 364 arch/arm/mach-integrator/impd1.c (unsigned long)dev->resource.start); start 380 arch/arm/mach-integrator/impd1.c pc_base = dev->resource.start + idev->offset; start 111 arch/arm/mach-integrator/integrator_ap.c u32 phybase = dev->res.start; start 211 arch/arm/mach-integrator/integrator_ap.c lmdev->resource.start = 0xc0000000 + 0x10000000 * i; start 212 arch/arm/mach-integrator/integrator_ap.c lmdev->resource.end = lmdev->resource.start + 0x0fffffff; start 28 arch/arm/mach-iop32x/adma.c .start = IOP3XX_DMA_PHYS_BASE(0), start 33 arch/arm/mach-iop32x/adma.c .start = IRQ_DMA0_EOT, start 38 arch/arm/mach-iop32x/adma.c .start = IRQ_DMA0_EOC, start 43 arch/arm/mach-iop32x/adma.c .start = IRQ_DMA0_ERR, start 51 arch/arm/mach-iop32x/adma.c .start = IOP3XX_DMA_PHYS_BASE(1), start 56 arch/arm/mach-iop32x/adma.c .start = IRQ_DMA1_EOT, start 61 arch/arm/mach-iop32x/adma.c .start = IRQ_DMA1_EOC, start 66 arch/arm/mach-iop32x/adma.c .start = IRQ_DMA1_ERR, start 75 arch/arm/mach-iop32x/adma.c .start = IOP3XX_AAU_PHYS_BASE, start 80 arch/arm/mach-iop32x/adma.c .start = IRQ_AA_EOT, start 85 arch/arm/mach-iop32x/adma.c .start = IRQ_AA_EOC, start 90 arch/arm/mach-iop32x/adma.c .start = IRQ_AA_ERR, start 128 arch/arm/mach-iop32x/em7210.c .start = 0xf0000000, start 163 arch/arm/mach-iop32x/em7210.c .start = IQ31244_UART, start 120 arch/arm/mach-iop32x/glantank.c .start = 0xf0000000, start 149 arch/arm/mach-iop32x/glantank.c .start = GLANTANK_UART, start 56 arch/arm/mach-iop32x/i2c.c .start = 0xfffff680, start 61 arch/arm/mach-iop32x/i2c.c .start = IRQ_IOP32X_I2C_0, start 77 arch/arm/mach-iop32x/i2c.c .start = 0xfffff6a0, start 82 arch/arm/mach-iop32x/i2c.c .start = IRQ_IOP32X_I2C_1, start 200 arch/arm/mach-iop32x/iq31244.c .start = 0xf0000000, start 229 arch/arm/mach-iop32x/iq31244.c .start = IQ31244_UART, start 126 arch/arm/mach-iop32x/iq80321.c .start = 0xf0000000, start 155 arch/arm/mach-iop32x/iq80321.c .start = IQ80321_UART, start 152 arch/arm/mach-iop32x/n2100.c .start = 0xf0000000, start 182 arch/arm/mach-iop32x/n2100.c .start = N2100_UART, start 196 arch/arm/mach-iop32x/pci.c res->start = IOP3XX_PCI_LOWER_MEM_PA; start 11 arch/arm/mach-iop32x/pmu.c .start = IRQ_IOP32X_CORE_PMU, start 75 arch/arm/mach-ixp4xx/avila-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 80 arch/arm/mach-ixp4xx/avila-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 125 arch/arm/mach-ixp4xx/avila-setup.c .start = IRQ_IXP4XX_GPIO12, start 154 arch/arm/mach-ixp4xx/avila-setup.c avila_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 162 arch/arm/mach-ixp4xx/avila-setup.c avila_pata_resources[0].start = IXP4XX_EXP_BUS_BASE(1); start 165 arch/arm/mach-ixp4xx/avila-setup.c avila_pata_resources[1].start = IXP4XX_EXP_BUS_BASE(2); start 432 arch/arm/mach-ixp4xx/common-pci.c res[0].start = 0x00000000; start 437 arch/arm/mach-ixp4xx/common-pci.c res[1].start = PCIBIOS_MIN_MEM; start 104 arch/arm/mach-ixp4xx/common.c .start = 0xc800b000, start 109 arch/arm/mach-ixp4xx/common.c .start = IRQ_IXP4XX_USB, start 117 arch/arm/mach-ixp4xx/common.c .start = IXP4XX_GPIO_BASE_PHYS, start 149 arch/arm/mach-ixp4xx/common.c .start = IXP4XX_NPEA_BASE_PHYS, start 154 arch/arm/mach-ixp4xx/common.c .start = IXP4XX_NPEB_BASE_PHYS, start 159 arch/arm/mach-ixp4xx/common.c .start = IXP4XX_NPEC_BASE_PHYS, start 175 arch/arm/mach-ixp4xx/common.c .start = IXP4XX_QMGR_BASE_PHYS, start 180 arch/arm/mach-ixp4xx/common.c .start = IRQ_IXP4XX_QM1, start 185 arch/arm/mach-ixp4xx/common.c .start = IRQ_IXP4XX_QM2, start 207 arch/arm/mach-ixp4xx/common.c .start = 0xc8011000, start 212 arch/arm/mach-ixp4xx/common.c .start = IRQ_IXP4XX_I2C, start 59 arch/arm/mach-ixp4xx/coyote-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 96 arch/arm/mach-ixp4xx/coyote-setup.c coyote_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 123 arch/arm/mach-ixp4xx/dsmg600-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 128 arch/arm/mach-ixp4xx/dsmg600-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 273 arch/arm/mach-ixp4xx/dsmg600-setup.c dsmg600_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 86 arch/arm/mach-ixp4xx/fsg-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 91 arch/arm/mach-ixp4xx/fsg-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 197 arch/arm/mach-ixp4xx/fsg-setup.c fsg_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 53 arch/arm/mach-ixp4xx/gateway7001-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 90 arch/arm/mach-ixp4xx/gateway7001-setup.c gateway7001_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 230 arch/arm/mach-ixp4xx/goramo_mlr.c .start = IXP4XX_UART1_BASE_PHYS, start 235 arch/arm/mach-ixp4xx/goramo_mlr.c .start = IXP4XX_UART2_BASE_PHYS, start 434 arch/arm/mach-ixp4xx/goramo_mlr.c flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 82 arch/arm/mach-ixp4xx/gtwx5715-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 87 arch/arm/mach-ixp4xx/gtwx5715-setup.c .start = IRQ_IXP4XX_UART2, start 146 arch/arm/mach-ixp4xx/gtwx5715-setup.c gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 148 arch/arm/mach-ixp4xx/ixdp425-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 153 arch/arm/mach-ixp4xx/ixdp425-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 230 arch/arm/mach-ixp4xx/ixdp425-setup.c ixdp425_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 236 arch/arm/mach-ixp4xx/ixdp425-setup.c ixdp425_flash_nand_resource.start = IXP4XX_EXP_BUS_BASE(3), start 126 arch/arm/mach-ixp4xx/nas100d-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 131 arch/arm/mach-ixp4xx/nas100d-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 284 arch/arm/mach-ixp4xx/nas100d-setup.c nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 132 arch/arm/mach-ixp4xx/nslu2-setup.c .start = IRQ_IXP4XX_TIMER2, start 146 arch/arm/mach-ixp4xx/nslu2-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 151 arch/arm/mach-ixp4xx/nslu2-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 265 arch/arm/mach-ixp4xx/nslu2-setup.c nslu2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 114 arch/arm/mach-ixp4xx/omixp-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 118 arch/arm/mach-ixp4xx/omixp-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 226 arch/arm/mach-ixp4xx/omixp-setup.c omixp_flash_resources[0].start = IXP4XX_EXP_BUS_BASE(0); start 230 arch/arm/mach-ixp4xx/omixp-setup.c omixp_flash_resources[1].start = IXP4XX_EXP_BUS_BASE(2); start 67 arch/arm/mach-ixp4xx/vulcan-setup.c .start = IXP4XX_UART1_BASE_PHYS, start 72 arch/arm/mach-ixp4xx/vulcan-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 203 arch/arm/mach-ixp4xx/vulcan-setup.c vulcan_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 213 arch/arm/mach-ixp4xx/vulcan-setup.c vulcan_sram_resource.start = IXP4XX_EXP_BUS_BASE(2); start 224 arch/arm/mach-ixp4xx/vulcan-setup.c vulcan_uart_resources[2].start = IXP4XX_EXP_BUS_BASE(3); start 226 arch/arm/mach-ixp4xx/vulcan-setup.c vulcan_uart_data[2].mapbase = vulcan_uart_resources[2].start; start 240 arch/arm/mach-ixp4xx/vulcan-setup.c vulcan_max6369_resource.start = IXP4XX_EXP_BUS_BASE(5); start 54 arch/arm/mach-ixp4xx/wg302v2-setup.c .start = IXP4XX_UART2_BASE_PHYS, start 91 arch/arm/mach-ixp4xx/wg302v2-setup.c wg302v2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); start 26 arch/arm/mach-keystone/platsmp.c unsigned long start = virt_to_idmap(&secondary_startup); start 30 arch/arm/mach-keystone/platsmp.c cpu, start); start 32 arch/arm/mach-keystone/platsmp.c error = keystone_cpu_smc(KEYSTONE_MON_CPU_UP_IDX, cpu, start); start 122 arch/arm/mach-mmp/aspenite.c .start = SMC_CS1_PHYS_BASE + 0x300, start 127 arch/arm/mach-mmp/aspenite.c .start = MMP_GPIO_TO_IRQ(27), start 32 arch/arm/mach-mmp/devices.c if (desc->start != -1ul && desc->size > 0) { start 33 arch/arm/mach-mmp/devices.c res[nres].start = desc->start; start 34 arch/arm/mach-mmp/devices.c res[nres].end = desc->start + desc->size - 1; start 40 arch/arm/mach-mmp/devices.c res[nres].start = desc->irq; start 50 arch/arm/mach-mmp/devices.c res[nres].start = desc->dma[i]; start 243 arch/arm/mach-mmp/devices.c .start = PXA168_U2O_PHYBASE, start 265 arch/arm/mach-mmp/devices.c .start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET, start 272 arch/arm/mach-mmp/devices.c .start = PXA168_U2O_PHYBASE, start 278 arch/arm/mach-mmp/devices.c .start = IRQ_PXA168_USB1, start 299 arch/arm/mach-mmp/devices.c .start = PXA168_U2O_REGBASE, start 304 arch/arm/mach-mmp/devices.c .start = IRQ_PXA168_USB1, start 327 arch/arm/mach-mmp/devices.c .start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET, start 334 arch/arm/mach-mmp/devices.c .start = PXA168_U2O_PHYBASE, start 340 arch/arm/mach-mmp/devices.c .start = IRQ_PXA168_USB1, start 15 arch/arm/mach-mmp/devices.h unsigned long start; start 26 arch/arm/mach-mmp/devices.h .start = _start, \ start 37 arch/arm/mach-mmp/devices.h .start = _start, \ start 48 arch/arm/mach-mmp/devices.h .start = _start, \ start 88 arch/arm/mach-mmp/flint.c .start = SMC_CS1_PHYS_BASE + 0x300, start 93 arch/arm/mach-mmp/flint.c .start = MMP_GPIO_TO_IRQ(155), start 159 arch/arm/mach-mmp/mmp2.c .start = 0xd4019000, start 163 arch/arm/mach-mmp/mmp2.c .start = IRQ_MMP2_GPIO, start 114 arch/arm/mach-mmp/pxa168.c .start = 0xd4019000, start 118 arch/arm/mach-mmp/pxa168.c .start = IRQ_PXA168_GPIOX, start 135 arch/arm/mach-mmp/pxa168.c .start = PXA168_U2H_REGBASE + U2x_CAPREGS_OFFSET, start 142 arch/arm/mach-mmp/pxa168.c .start = PXA168_U2H_PHYBASE, start 148 arch/arm/mach-mmp/pxa168.c .start = IRQ_PXA168_USB2, start 149 arch/arm/mach-mmp/pxa910.c .start = 0xd4019000, start 153 arch/arm/mach-mmp/pxa910.c .start = IRQ_PXA910_AP_GPIO, start 169 arch/arm/mach-mmp/pxa910.c .start = 0xd4010000, start 173 arch/arm/mach-mmp/pxa910.c .start = IRQ_PXA910_RTC_INT, start 178 arch/arm/mach-mmp/pxa910.c .start = IRQ_PXA910_RTC_ALARM, start 85 arch/arm/mach-mmp/sram.c info->sram_phys = (phys_addr_t)res->start; start 71 arch/arm/mach-mmp/tavorevb.c .start = SMC_CS1_PHYS_BASE + 0x300, start 76 arch/arm/mach-mmp/tavorevb.c .start = MMP_GPIO_TO_IRQ(80), start 116 arch/arm/mach-mmp/ttc_dkb.c .start = SMC_CS0_PHYS_BASE, start 61 arch/arm/mach-mv78xx0/pcie.c u32 start; start 64 arch/arm/mach-mv78xx0/pcie.c pcie_io_space.start = MV78XX0_PCIE_IO_PHYS_BASE(0); start 76 arch/arm/mach-mv78xx0/pcie.c start = MV78XX0_PCIE_MEM_PHYS_BASE; start 85 arch/arm/mach-mv78xx0/pcie.c pp->res.start = start; start 86 arch/arm/mach-mv78xx0/pcie.c pp->res.end = start + size_each - 1; start 87 arch/arm/mach-mv78xx0/pcie.c start += size_each; start 94 arch/arm/mach-mv78xx0/pcie.c pp->res.start, resource_size(&pp->res)); start 189 arch/arm/mach-mv78xx0/pcie.c dev->resource[i].start = 0; start 126 arch/arm/mach-mvebu/coherency.c coherency_phys_base = res.start; start 53 arch/arm/mach-mvebu/cpu-reset.c if (!request_mem_region(res.start, resource_size(&res), start 59 arch/arm/mach-mvebu/cpu-reset.c cpu_reset_base = ioremap(res.start, resource_size(&res)); start 62 arch/arm/mach-mvebu/cpu-reset.c release_mem_region(res.start, resource_size(&res)); start 31 arch/arm/mach-mvebu/kirkwood.c .start = CPU_CONTROL_PHYS, start 52 arch/arm/mach-mvebu/kirkwood.c .start = DDR_OPERATION_BASE, start 145 arch/arm/mach-mvebu/platsmp.c if (res.start != AXP_BOOTROM_BASE || start 252 arch/arm/mach-mvebu/pm.c if (!request_mem_region(res.start, resource_size(&res), start 258 arch/arm/mach-mvebu/pm.c sdram_ctrl = ioremap(res.start, resource_size(&res)); start 260 arch/arm/mach-mvebu/pm.c release_mem_region(res.start, resource_size(&res)); start 181 arch/arm/mach-mvebu/pmsu.c res.start = res.start - PMSU_BASE_OFFSET; start 182 arch/arm/mach-mvebu/pmsu.c res.end = res.start + PMSU_REG_SIZE - 1; start 185 arch/arm/mach-mvebu/pmsu.c if (!request_mem_region(res.start, resource_size(&res), start 192 arch/arm/mach-mvebu/pmsu.c pmsu_mp_phys_base = res.start; start 194 arch/arm/mach-mvebu/pmsu.c pmsu_mp_base = ioremap(res.start, resource_size(&res)); start 197 arch/arm/mach-mvebu/pmsu.c release_mem_region(res.start, resource_size(&res)); start 172 arch/arm/mach-mvebu/system-controller.c system_controller_phys_base = res.start; start 202 arch/arm/mach-omap1/ams-delta-fiq.c serio->resource[0].start = gpiod_to_irq(clk); start 203 arch/arm/mach-omap1/ams-delta-fiq.c serio->resource[0].end = serio->resource[0].start; start 173 arch/arm/mach-omap1/board-ams-delta.c .start = LATCH1_PHYS, start 211 arch/arm/mach-omap1/board-ams-delta.c .start = LATCH2_PHYS, start 329 arch/arm/mach-omap1/board-ams-delta.c .start = INT_KEYBOARD, start 500 arch/arm/mach-omap1/board-ams-delta.c .start = -EINVAL, start 109 arch/arm/mach-omap1/board-fsample.c .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */ start 114 arch/arm/mach-omap1/board-fsample.c .start = INT_7XX_MPU_EXT_NIRQ, start 168 arch/arm/mach-omap1/board-fsample.c .start = OMAP_CS0_PHYS, start 203 arch/arm/mach-omap1/board-fsample.c .start = OMAP_CS3_PHYS, start 230 arch/arm/mach-omap1/board-fsample.c .start = INT_7XX_MPUIO_KEYPAD, start 222 arch/arm/mach-omap1/board-h2.c .start = OMAP1610_ETHR_START, /* Physical */ start 243 arch/arm/mach-omap1/board-h2.c .start = INT_KEYBOARD, start 373 arch/arm/mach-omap1/board-h2.c h2_nor_resource.end = h2_nor_resource.start = omap_cs3_phys(); start 376 arch/arm/mach-omap1/board-h2.c h2_nand_resource.end = h2_nand_resource.start = OMAP_CS2B_PHYS; start 405 arch/arm/mach-omap1/board-h2.c h2_smc91x_resources[1].start = gpio_to_irq(0); start 227 arch/arm/mach-omap1/board-h3.c .start = OMAP1710_ETHR_START, /* Physical */ start 261 arch/arm/mach-omap1/board-h3.c .start = GPTIMER_REGS(0), /* Physical */ start 266 arch/arm/mach-omap1/board-h3.c .start = INT_1610_GPTIMER1, start 281 arch/arm/mach-omap1/board-h3.c .start = INT_KEYBOARD, start 401 arch/arm/mach-omap1/board-h3.c nor_resource.end = nor_resource.start = omap_cs3_phys(); start 404 arch/arm/mach-omap1/board-h3.c nand_resource.end = nand_resource.start = OMAP_CS2B_PHYS; start 430 arch/arm/mach-omap1/board-h3.c smc91x_resources[1].start = gpio_to_irq(40); start 232 arch/arm/mach-omap1/board-htcherald.c .start = INT_7XX_MPUIO_KEYPAD, start 559 arch/arm/mach-omap1/board-htcherald.c htcpld_resources[0].start = gpio_to_irq(HTCHERALD_GIRQ_BTNS); start 104 arch/arm/mach-omap1/board-innovator.c .start = OMAP_CS0_PHYS, start 121 arch/arm/mach-omap1/board-innovator.c .start = INT_KEYBOARD, start 173 arch/arm/mach-omap1/board-innovator.c .start = OMAP1510_FPGA_ETHR_START, /* Physical */ start 178 arch/arm/mach-omap1/board-innovator.c .start = OMAP1510_INT_ETHER, start 242 arch/arm/mach-omap1/board-innovator.c .start = INNOVATOR1610_ETHR_START, /* Physical */ start 406 arch/arm/mach-omap1/board-innovator.c innovator1610_smc91x_resources[1].start = gpio_to_irq(0); start 58 arch/arm/mach-omap1/board-nokia770.c .start = INT_KEYBOARD, start 129 arch/arm/mach-omap1/board-osk.c .start = OMAP_OSK_ETHR_START, /* Physical */ start 355 arch/arm/mach-omap1/board-osk.c .start = INT_KEYBOARD, start 572 arch/arm/mach-omap1/board-osk.c osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys(); start 574 arch/arm/mach-omap1/board-osk.c osk5912_smc91x_resources[1].start = gpio_to_irq(0); start 576 arch/arm/mach-omap1/board-osk.c osk5912_cf_resources[0].start = gpio_to_irq(62); start 89 arch/arm/mach-omap1/board-palmte.c .start = INT_KEYBOARD, start 134 arch/arm/mach-omap1/board-palmte.c .start = OMAP_CS0_PHYS, start 113 arch/arm/mach-omap1/board-palmtt.c .start = OMAP_CS0_PHYS, start 130 arch/arm/mach-omap1/board-palmtt.c .start = INT_KEYBOARD, start 86 arch/arm/mach-omap1/board-palmz71.c .start = INT_KEYBOARD, start 131 arch/arm/mach-omap1/board-palmz71.c .start = OMAP_CS0_PHYS, start 76 arch/arm/mach-omap1/board-perseus2.c .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */ start 81 arch/arm/mach-omap1/board-perseus2.c .start = INT_7XX_MPU_EXT_NIRQ, start 126 arch/arm/mach-omap1/board-perseus2.c .start = OMAP_CS0_PHYS, start 161 arch/arm/mach-omap1/board-perseus2.c .start = OMAP_CS3_PHYS, start 188 arch/arm/mach-omap1/board-perseus2.c .start = INT_7XX_MPUIO_KEYPAD, start 198 arch/arm/mach-omap1/board-sx1.c .start = INT_KEYBOARD, start 268 arch/arm/mach-omap1/board-sx1.c .start = OMAP_CS0_PHYS, start 38 arch/arm/mach-omap1/devices.c .start = OMAP_RTC_BASE, start 43 arch/arm/mach-omap1/devices.c .start = INT_RTC_TIMER, start 47 arch/arm/mach-omap1/devices.c .start = INT_RTC_ALARM, start 146 arch/arm/mach-omap1/devices.c res[0].start = base; start 149 arch/arm/mach-omap1/devices.c res[1].start = res[1].end = irq; start 151 arch/arm/mach-omap1/devices.c res[2].start = rx_req; start 154 arch/arm/mach-omap1/devices.c res[3].start = tx_req; start 267 arch/arm/mach-omap1/devices.c .start = OMAP1_CAMERA_BASE, start 272 arch/arm/mach-omap1/devices.c .start = INT_CAMERA, start 320 arch/arm/mach-omap1/devices.c .start = OMAP_UWIRE_BASE, start 354 arch/arm/mach-omap1/devices.c .start = OMAP1_RNG_BASE, start 424 arch/arm/mach-omap1/devices.c .start = 0xfffeb000, start 81 arch/arm/mach-omap1/dma.c .start = OMAP1_DMA_BASE, start 87 arch/arm/mach-omap1/dma.c .start = INT_DMA_CH0_6, start 92 arch/arm/mach-omap1/dma.c .start = INT_DMA_CH1_7, start 97 arch/arm/mach-omap1/dma.c .start = INT_DMA_CH2_8, start 102 arch/arm/mach-omap1/dma.c .start = INT_DMA_CH3, start 107 arch/arm/mach-omap1/dma.c .start = INT_DMA_CH4, start 112 arch/arm/mach-omap1/dma.c .start = INT_DMA_CH5, start 118 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH6, start 124 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH7, start 129 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH8, start 134 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH9, start 139 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH10, start 144 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH11, start 149 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH12, start 154 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH13, start 159 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH14, start 164 arch/arm/mach-omap1/dma.c .start = INT_1610_DMA_CH15, start 169 arch/arm/mach-omap1/dma.c .start = INT_DMA_LCD, start 324 arch/arm/mach-omap1/dma.c dma_base = ioremap(res[0].start, resource_size(&res[0])); start 30 arch/arm/mach-omap1/gpio15xx.c .start = OMAP1_MPUIO_VBASE, start 35 arch/arm/mach-omap1/gpio15xx.c .start = INT_MPUIO, start 71 arch/arm/mach-omap1/gpio15xx.c .start = OMAP1510_GPIO_BASE, start 76 arch/arm/mach-omap1/gpio15xx.c .start = INT_GPIO_BANK1, start 38 arch/arm/mach-omap1/gpio16xx.c .start = OMAP1_MPUIO_VBASE, start 43 arch/arm/mach-omap1/gpio16xx.c .start = INT_MPUIO, start 79 arch/arm/mach-omap1/gpio16xx.c .start = OMAP1610_GPIO1_BASE, start 84 arch/arm/mach-omap1/gpio16xx.c .start = INT_GPIO_BANK1, start 123 arch/arm/mach-omap1/gpio16xx.c .start = OMAP1610_GPIO2_BASE, start 128 arch/arm/mach-omap1/gpio16xx.c .start = INT_1610_GPIO_BANK2, start 151 arch/arm/mach-omap1/gpio16xx.c .start = OMAP1610_GPIO3_BASE, start 156 arch/arm/mach-omap1/gpio16xx.c .start = INT_1610_GPIO_BANK3, start 179 arch/arm/mach-omap1/gpio16xx.c .start = OMAP1610_GPIO4_BASE, start 184 arch/arm/mach-omap1/gpio16xx.c .start = INT_1610_GPIO_BANK4, start 245 arch/arm/mach-omap1/gpio16xx.c base = ioremap(res->start, resource_size(res)); start 37 arch/arm/mach-omap1/gpio7xx.c .start = OMAP1_MPUIO_VBASE, start 42 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_MPUIO, start 78 arch/arm/mach-omap1/gpio7xx.c .start = OMAP7XX_GPIO1_BASE, start 83 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_GPIO_BANK1, start 117 arch/arm/mach-omap1/gpio7xx.c .start = OMAP7XX_GPIO2_BASE, start 122 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_GPIO_BANK2, start 145 arch/arm/mach-omap1/gpio7xx.c .start = OMAP7XX_GPIO3_BASE, start 150 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_GPIO_BANK3, start 173 arch/arm/mach-omap1/gpio7xx.c .start = OMAP7XX_GPIO4_BASE, start 178 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_GPIO_BANK4, start 201 arch/arm/mach-omap1/gpio7xx.c .start = OMAP7XX_GPIO5_BASE, start 206 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_GPIO_BANK5, start 229 arch/arm/mach-omap1/gpio7xx.c .start = OMAP7XX_GPIO6_BASE, start 234 arch/arm/mach-omap1/gpio7xx.c .start = INT_7XX_GPIO_BANK6, start 51 arch/arm/mach-omap1/i2c.c res[0].start = OMAP1_I2C_BASE; start 52 arch/arm/mach-omap1/i2c.c res[0].end = res[0].start + OMAP_I2C_SIZE; start 54 arch/arm/mach-omap1/i2c.c res[1].start = INT_I2C; start 97 arch/arm/mach-omap1/mcbsp.c .start = OMAP7XX_MCBSP1_BASE, start 103 arch/arm/mach-omap1/mcbsp.c .start = INT_7XX_McBSP1RX, start 108 arch/arm/mach-omap1/mcbsp.c .start = INT_7XX_McBSP1TX, start 113 arch/arm/mach-omap1/mcbsp.c .start = 9, start 118 arch/arm/mach-omap1/mcbsp.c .start = 8, start 124 arch/arm/mach-omap1/mcbsp.c .start = OMAP7XX_MCBSP2_BASE, start 130 arch/arm/mach-omap1/mcbsp.c .start = INT_7XX_McBSP2RX, start 135 arch/arm/mach-omap1/mcbsp.c .start = INT_7XX_McBSP2TX, start 140 arch/arm/mach-omap1/mcbsp.c .start = 11, start 145 arch/arm/mach-omap1/mcbsp.c .start = 10, start 174 arch/arm/mach-omap1/mcbsp.c .start = OMAP1510_MCBSP1_BASE, start 180 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP1RX, start 185 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP1TX, start 190 arch/arm/mach-omap1/mcbsp.c .start = 9, start 195 arch/arm/mach-omap1/mcbsp.c .start = 8, start 201 arch/arm/mach-omap1/mcbsp.c .start = OMAP1510_MCBSP2_BASE, start 207 arch/arm/mach-omap1/mcbsp.c .start = INT_1510_SPI_RX, start 212 arch/arm/mach-omap1/mcbsp.c .start = INT_1510_SPI_TX, start 217 arch/arm/mach-omap1/mcbsp.c .start = 17, start 222 arch/arm/mach-omap1/mcbsp.c .start = 16, start 228 arch/arm/mach-omap1/mcbsp.c .start = OMAP1510_MCBSP3_BASE, start 234 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP3RX, start 239 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP3TX, start 244 arch/arm/mach-omap1/mcbsp.c .start = 11, start 249 arch/arm/mach-omap1/mcbsp.c .start = 10, start 281 arch/arm/mach-omap1/mcbsp.c .start = OMAP1610_MCBSP1_BASE, start 287 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP1RX, start 292 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP1TX, start 297 arch/arm/mach-omap1/mcbsp.c .start = 9, start 302 arch/arm/mach-omap1/mcbsp.c .start = 8, start 308 arch/arm/mach-omap1/mcbsp.c .start = OMAP1610_MCBSP2_BASE, start 314 arch/arm/mach-omap1/mcbsp.c .start = INT_1610_McBSP2_RX, start 319 arch/arm/mach-omap1/mcbsp.c .start = INT_1610_McBSP2_TX, start 324 arch/arm/mach-omap1/mcbsp.c .start = 17, start 329 arch/arm/mach-omap1/mcbsp.c .start = 16, start 335 arch/arm/mach-omap1/mcbsp.c .start = OMAP1610_MCBSP3_BASE, start 341 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP3RX, start 346 arch/arm/mach-omap1/mcbsp.c .start = INT_McBSP3TX, start 351 arch/arm/mach-omap1/mcbsp.c .start = 11, start 356 arch/arm/mach-omap1/mcbsp.c .start = 10, start 121 arch/arm/mach-omap1/timer.c res[0].start = base; start 124 arch/arm/mach-omap1/timer.c res[1].start = irq; start 161 arch/arm/mach-omap1/usb.c .start = UDC_BASE, start 165 arch/arm/mach-omap1/usb.c .start = INT_USB_IRQ_GEN, start 168 arch/arm/mach-omap1/usb.c .start = INT_USB_IRQ_NISO, start 171 arch/arm/mach-omap1/usb.c .start = INT_USB_IRQ_ISO, start 193 arch/arm/mach-omap1/usb.c udc_resources[1].start = INT_7XX_USB_GENI; start 194 arch/arm/mach-omap1/usb.c udc_resources[2].start = INT_7XX_USB_NON_ISO; start 195 arch/arm/mach-omap1/usb.c udc_resources[3].start = INT_7XX_USB_ISO; start 215 arch/arm/mach-omap1/usb.c .start = OMAP_OHCI_BASE, start 220 arch/arm/mach-omap1/usb.c .start = INT_USB_IRQ_HGEN, start 239 arch/arm/mach-omap1/usb.c ohci_resources[1].start = INT_7XX_USB_HHC_1; start 257 arch/arm/mach-omap1/usb.c .start = OTG_BASE, start 261 arch/arm/mach-omap1/usb.c .start = INT_USB_IRQ_OTG, start 276 arch/arm/mach-omap1/usb.c otg_resources[1].start = INT_7XX_USB_OTG; start 345 arch/arm/mach-omap2/cm_common.c data->mem = ioremap(res.start, resource_size(&res)); start 348 arch/arm/mach-omap2/cm_common.c mem->pa = res.start + data->offset; start 264 arch/arm/mach-omap2/dma.c dma_base = ioremap(mem->start, resource_size(mem)); start 448 arch/arm/mach-omap2/omap_device.c res[1].start = irq; start 2297 arch/arm/mach-omap2/omap_hwmod.c res->start = base; start 2349 arch/arm/mach-omap2/omap_hwmod.c va_start = ioremap(res.start, resource_size(&res)); start 758 arch/arm/mach-omap2/prm_common.c data->mem = ioremap(res.start, resource_size(&res)); start 762 arch/arm/mach-omap2/prm_common.c prm_base.pa = res.start + data->offset; start 169 arch/arm/mach-omap2/usb-tusb6010.c &tusb_resources[0].start); start 174 arch/arm/mach-omap2/usb-tusb6010.c tusb_resources[0].end = tusb_resources[0].start + 0x9ff; start 184 arch/arm/mach-omap2/usb-tusb6010.c &tusb_resources[1].start); start 189 arch/arm/mach-omap2/usb-tusb6010.c tusb_resources[1].end = tusb_resources[1].start + 0x9ff; start 203 arch/arm/mach-omap2/usb-tusb6010.c tusb_resources[2].start = gpio_to_irq(irq); start 385 arch/arm/mach-orion5x/common.c t->u.mem.start & ~PAGE_MASK)) { start 388 arch/arm/mach-orion5x/common.c t->u.mem.size / 1024, t->u.mem.start); start 81 arch/arm/mach-orion5x/db88f5281-setup.c .start = DB88F5281_NOR_BOOT_BASE, start 105 arch/arm/mach-orion5x/db88f5281-setup.c .start = DB88F5281_NOR_BASE, start 145 arch/arm/mach-orion5x/db88f5281-setup.c .start = DB88F5281_NAND_BASE, start 154 arch/arm/mach-orion5x/dns323-setup.c .start = DNS323_NOR_BOOT_BASE, start 71 arch/arm/mach-orion5x/kurobox_pro-setup.c .start = KUROBOX_PRO_NAND_BASE, start 103 arch/arm/mach-orion5x/kurobox_pro-setup.c .start = KUROBOX_PRO_NOR_BOOT_BASE, start 49 arch/arm/mach-orion5x/ls_hgl-setup.c .start = LS_HGL_NOR_BOOT_BASE, start 64 arch/arm/mach-orion5x/mv2120-setup.c .start = MV2120_NOR_BOOT_BASE, start 68 arch/arm/mach-orion5x/net2big-setup.c .start = NET2BIG_NOR_BOOT_BASE, start 181 arch/arm/mach-orion5x/pci.c res->start = ORION5X_PCIE_MEM_PHYS_BASE; start 182 arch/arm/mach-orion5x/pci.c res->end = res->start + ORION5X_PCIE_MEM_SIZE - 1; start 499 arch/arm/mach-orion5x/pci.c res->start = ORION5X_PCI_MEM_PHYS_BASE; start 500 arch/arm/mach-orion5x/pci.c res->end = res->start + ORION5X_PCI_MEM_SIZE - 1; start 521 arch/arm/mach-orion5x/pci.c dev->resource[i].start = 0; start 46 arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c .start = RD88F5181L_FXO_NOR_BOOT_BASE, start 47 arch/arm/mach-orion5x/rd88f5181l-ge-setup.c .start = RD88F5181L_GE_NOR_BOOT_BASE, start 66 arch/arm/mach-orion5x/rd88f5182-setup.c .start = RD88F5182_NOR_BASE, start 56 arch/arm/mach-orion5x/terastation_pro2-setup.c .start = TSP2_NOR_BOOT_BASE, start 83 arch/arm/mach-orion5x/ts209-setup.c .start = QNAP_TS209_NOR_BOOT_BASE, start 103 arch/arm/mach-orion5x/ts409-setup.c .start = QNAP_TS409_NOR_BOOT_BASE, start 80 arch/arm/mach-orion5x/wnr854t-setup.c .start = WNR854T_NOR_BOOT_BASE, start 165 arch/arm/mach-orion5x/wrt350n-v2-setup.c .start = WRT350N_V2_NOR_BOOT_BASE, start 143 arch/arm/mach-pxa/balloon3.c .start = PXA_CS0_PHYS, start 694 arch/arm/mach-pxa/balloon3.c .start = BALLOON3_NAND_BASE, start 41 arch/arm/mach-pxa/capc7117.c .start = 0x11000020, start 46 arch/arm/mach-pxa/capc7117.c .start = 0x1100001c, start 51 arch/arm/mach-pxa/capc7117.c .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO76)), start 152 arch/arm/mach-pxa/cm-x255.c .start = PXA_CS0_PHYS, start 189 arch/arm/mach-pxa/cm-x255.c .start = PXA_CS1_PHYS, start 194 arch/arm/mach-pxa/cm-x255.c .start = PXA_CS5_PHYS, start 129 arch/arm/mach-pxa/cm-x270.c .start = RTC_PHYS_BASE, start 164 arch/arm/mach-pxa/cm-x270.c .start = PXA_CS2_PHYS, start 170 arch/arm/mach-pxa/cm-x270.c .start = PXA_CS2_PHYS + 0x03fe0000, start 75 arch/arm/mach-pxa/cm-x2xx.c .start = CMX255_DM9000_PHYS_BASE, start 80 arch/arm/mach-pxa/cm-x2xx.c .start = CMX255_DM9000_PHYS_BASE + 4, start 85 arch/arm/mach-pxa/cm-x2xx.c .start = CMX255_ETHIRQ, start 93 arch/arm/mach-pxa/cm-x2xx.c .start = CMX270_DM9000_PHYS_BASE, start 98 arch/arm/mach-pxa/cm-x2xx.c .start = CMX270_DM9000_PHYS_BASE + 8, start 103 arch/arm/mach-pxa/cm-x2xx.c .start = CMX270_ETHIRQ, start 222 arch/arm/mach-pxa/cm-x300.c .start = CM_X300_ETH_PHYS, start 227 arch/arm/mach-pxa/cm-x300.c .start = CM_X300_ETH_PHYS + 0x4, start 232 arch/arm/mach-pxa/cm-x300.c .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO99)), start 866 arch/arm/mach-pxa/cm-x300.c tags->u.mem.start == 0x80000000) { start 867 arch/arm/mach-pxa/cm-x300.c tags->u.mem.start = 0xa0000000; start 180 arch/arm/mach-pxa/colibri-pxa270.c .start = PXA_CS0_PHYS, start 209 arch/arm/mach-pxa/colibri-pxa270.c .start = PXA_CS2_PHYS, start 214 arch/arm/mach-pxa/colibri-pxa270.c .start = PXA_CS2_PHYS + 4, start 219 arch/arm/mach-pxa/colibri-pxa270.c .start = PXA_GPIO_TO_IRQ(GPIO114_COLIBRI_PXA270_ETH_IRQ), start 73 arch/arm/mach-pxa/colibri-pxa300.c .start = PXA3xx_CS2_PHYS, start 78 arch/arm/mach-pxa/colibri-pxa300.c .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), start 110 arch/arm/mach-pxa/colibri-pxa320.c .start = PXA3xx_CS2_PHYS, start 115 arch/arm/mach-pxa/colibri-pxa320.c .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), start 152 arch/arm/mach-pxa/corgi.c .start = 0x10800000, start 278 arch/arm/mach-pxa/corgi.c .start = 0x08000000, start 641 arch/arm/mach-pxa/corgi.c .start = 0x0C000000, start 671 arch/arm/mach-pxa/corgi.c .start = 0x00000000, start 170 arch/arm/mach-pxa/csb726.c .start = PXA_CS0_PHYS, start 187 arch/arm/mach-pxa/csb726.c .start = PXA_CS4_PHYS, start 193 arch/arm/mach-pxa/csb726.c .start = PXA_CS4_PHYS + SZ_64M - SZ_2M, start 199 arch/arm/mach-pxa/csb726.c .start = CSB726_IRQ_SM501, start 226 arch/arm/mach-pxa/csb726.c .start = PXA_CS3_PHYS, start 231 arch/arm/mach-pxa/csb726.c .start = CSB726_IRQ_LAN, start 41 arch/arm/mach-pxa/devices.c .start = IRQ_PMU, start 55 arch/arm/mach-pxa/devices.c .start = 0x41100000, start 60 arch/arm/mach-pxa/devices.c .start = IRQ_MMC, start 96 arch/arm/mach-pxa/devices.c .start = 0x40600000, start 101 arch/arm/mach-pxa/devices.c .start = IRQ_USB, start 134 arch/arm/mach-pxa/devices.c .start = 0x54100000, start 139 arch/arm/mach-pxa/devices.c .start = IRQ_USB2, start 160 arch/arm/mach-pxa/devices.c .start = 0x44000000, start 165 arch/arm/mach-pxa/devices.c .start = IRQ_LCD, start 192 arch/arm/mach-pxa/devices.c .start = 0x40100000, start 196 arch/arm/mach-pxa/devices.c .start = IRQ_FFUART, start 216 arch/arm/mach-pxa/devices.c .start = 0x40200000, start 220 arch/arm/mach-pxa/devices.c .start = IRQ_BTUART, start 240 arch/arm/mach-pxa/devices.c .start = 0x40700000, start 244 arch/arm/mach-pxa/devices.c .start = IRQ_STUART, start 264 arch/arm/mach-pxa/devices.c .start = 0x41600000, start 268 arch/arm/mach-pxa/devices.c .start = IRQ_HWUART, start 291 arch/arm/mach-pxa/devices.c .start = 0x40301680, start 295 arch/arm/mach-pxa/devices.c .start = IRQ_I2C, start 316 arch/arm/mach-pxa/devices.c .start = 0x40f00180, start 320 arch/arm/mach-pxa/devices.c .start = IRQ_PWRI2C, start 336 arch/arm/mach-pxa/devices.c .start = 0x40400000, start 340 arch/arm/mach-pxa/devices.c .start = IRQ_I2S, start 382 arch/arm/mach-pxa/devices.c .start = IRQ_STUART, start 387 arch/arm/mach-pxa/devices.c .start = IRQ_ICP, start 392 arch/arm/mach-pxa/devices.c .start = 0x40800000, start 397 arch/arm/mach-pxa/devices.c .start = 0x40700000, start 421 arch/arm/mach-pxa/devices.c .start = 0x40900000, start 426 arch/arm/mach-pxa/devices.c .start = IRQ_RTC1Hz, start 432 arch/arm/mach-pxa/devices.c .start = IRQ_RTCAlrm, start 455 arch/arm/mach-pxa/devices.c .start = 0x40500000, start 460 arch/arm/mach-pxa/devices.c .start = IRQ_AC97, start 500 arch/arm/mach-pxa/devices.c .start = 0x40b00000, start 515 arch/arm/mach-pxa/devices.c .start = 0x40c00000, start 532 arch/arm/mach-pxa/devices.c .start = 0x41000000, start 537 arch/arm/mach-pxa/devices.c .start = IRQ_SSP, start 558 arch/arm/mach-pxa/devices.c .start = 0x41400000, start 563 arch/arm/mach-pxa/devices.c .start = IRQ_NSSP, start 584 arch/arm/mach-pxa/devices.c .start = 0x41500000, start 589 arch/arm/mach-pxa/devices.c .start = IRQ_ASSP, start 611 arch/arm/mach-pxa/devices.c .start = 0x50000000, start 616 arch/arm/mach-pxa/devices.c .start = IRQ_CAMERA, start 644 arch/arm/mach-pxa/devices.c .start = 0x4C000000, start 649 arch/arm/mach-pxa/devices.c .start = IRQ_USBH1, start 675 arch/arm/mach-pxa/devices.c .start = 0x41500000, start 680 arch/arm/mach-pxa/devices.c .start = IRQ_KEYPAD, start 702 arch/arm/mach-pxa/devices.c .start = 0x41000000, start 707 arch/arm/mach-pxa/devices.c .start = IRQ_SSP, start 728 arch/arm/mach-pxa/devices.c .start = 0x41700000, start 733 arch/arm/mach-pxa/devices.c .start = IRQ_SSP2, start 754 arch/arm/mach-pxa/devices.c .start = 0x41900000, start 759 arch/arm/mach-pxa/devices.c .start = IRQ_SSP3, start 778 arch/arm/mach-pxa/devices.c .start = 0x40b00000, start 793 arch/arm/mach-pxa/devices.c .start = 0x40c00000, start 810 arch/arm/mach-pxa/devices.c .start = 0x42000000, start 815 arch/arm/mach-pxa/devices.c .start = IRQ_MMC2, start 839 arch/arm/mach-pxa/devices.c .start = 0x42500000, start 844 arch/arm/mach-pxa/devices.c .start = IRQ_MMC3, start 868 arch/arm/mach-pxa/devices.c .start = 0x54000000, start 873 arch/arm/mach-pxa/devices.c .start = IRQ_GCU, start 897 arch/arm/mach-pxa/devices.c .start = 0x40f500c0, start 901 arch/arm/mach-pxa/devices.c .start = IRQ_PWRI2C, start 916 arch/arm/mach-pxa/devices.c .start = 0x43100000, start 921 arch/arm/mach-pxa/devices.c .start = IRQ_NAND, start 949 arch/arm/mach-pxa/devices.c .start = 0x41a00000, start 954 arch/arm/mach-pxa/devices.c .start = IRQ_SSP4, start 1013 arch/arm/mach-pxa/devices.c .start = 0x40e00000, start 1017 arch/arm/mach-pxa/devices.c .start = IRQ_GPIO0, start 1022 arch/arm/mach-pxa/devices.c .start = IRQ_GPIO1, start 1027 arch/arm/mach-pxa/devices.c .start = IRQ_GPIO_2_x, start 1085 arch/arm/mach-pxa/devices.c .start = 0x40000000, start 1090 arch/arm/mach-pxa/devices.c .start = IRQ_DMA, start 199 arch/arm/mach-pxa/em-x270.c .start = PXA_CS2_PHYS, start 204 arch/arm/mach-pxa/em-x270.c .start = PXA_CS2_PHYS + 8, start 209 arch/arm/mach-pxa/em-x270.c .start = EM_X270_ETHIRQ, start 242 arch/arm/mach-pxa/em-x270.c .start = PXA_CS4_PHYS, start 353 arch/arm/mach-pxa/em-x270.c .start = PXA_CS1_PHYS, start 428 arch/arm/mach-pxa/em-x270.c .start = PXA_CS0_PHYS, start 117 arch/arm/mach-pxa/eseries.c .start = PXA_CS4_PHYS, start 122 arch/arm/mach-pxa/eseries.c .start = PXA_GPIO_TO_IRQ(GPIO_ESERIES_TMIO_IRQ), start 416 arch/arm/mach-pxa/eseries.c .start = 0x0c000000, start 612 arch/arm/mach-pxa/eseries.c .start = 0x0c000000, start 873 arch/arm/mach-pxa/eseries.c .start = 0x0c000000, start 20 arch/arm/mach-pxa/generic.h mi->bank[__nr].start = (__start), \ start 48 arch/arm/mach-pxa/gumstix.c .start = 0x00000000, start 81 arch/arm/mach-pxa/h5000.c .start = PXA_CS0_PHYS, start 87 arch/arm/mach-pxa/h5000.c .start = PXA_CS0_PHYS + SZ_32M, start 103 arch/arm/mach-pxa/himalaya.c .start = 0x08000000, start 71 arch/arm/mach-pxa/idp.c .start = (IDP_ETH_PHYS + 0x300), start 76 arch/arm/mach-pxa/idp.c .start = PXA_GPIO_TO_IRQ(4), start 257 arch/arm/mach-pxa/irq.c pxa_irq_base = io_p2v(res.start); start 118 arch/arm/mach-pxa/littleton.c .start = (LITTLETON_ETH_PHYS + 0x300), start 123 arch/arm/mach-pxa/littleton.c .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO90)), start 185 arch/arm/mach-pxa/lpd270.c .start = LPD270_ETH_PHYS, start 190 arch/arm/mach-pxa/lpd270.c .start = LPD270_ETHERNET_IRQ, start 210 arch/arm/mach-pxa/lpd270.c .start = PXA_CS0_PHYS, start 215 arch/arm/mach-pxa/lpd270.c .start = PXA_CS1_PHYS, start 165 arch/arm/mach-pxa/lubbock.c .start = 0x10000000, start 170 arch/arm/mach-pxa/lubbock.c .start = LUBBOCK_SA1111_IRQ, start 241 arch/arm/mach-pxa/lubbock.c .start = 0x0c000c00, start 246 arch/arm/mach-pxa/lubbock.c .start = LUBBOCK_ETH_IRQ, start 252 arch/arm/mach-pxa/lubbock.c .start = 0x0e000000, start 274 arch/arm/mach-pxa/lubbock.c .start = 0x00000000, start 279 arch/arm/mach-pxa/lubbock.c .start = 0x04000000, start 337 arch/arm/mach-pxa/lubbock.c .start = LUBBOCK_FPGA_PHYS + 0xc0, start 342 arch/arm/mach-pxa/lubbock.c .start = PXA_GPIO_TO_IRQ(0), start 347 arch/arm/mach-pxa/lubbock.c .start = LUBBOCK_IRQ(0), start 191 arch/arm/mach-pxa/magician.c .start = PXA_CS3_PHYS, start 196 arch/arm/mach-pxa/magician.c .start = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ), start 456 arch/arm/mach-pxa/magician.c .start = PXA_CS2_PHYS, start 462 arch/arm/mach-pxa/magician.c .start = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ), start 505 arch/arm/mach-pxa/magician.c .start = IRQ_MAGICIAN_VBUS, start 603 arch/arm/mach-pxa/magician.c .start = IRQ_MAGICIAN_VBUS, start 610 arch/arm/mach-pxa/magician.c .start = IRQ_MAGICIAN_VBUS, start 853 arch/arm/mach-pxa/magician.c .start = PXA_CS0_PHYS, start 127 arch/arm/mach-pxa/mainstone.c .start = (MST_ETH_PHYS + 0x300), start 132 arch/arm/mach-pxa/mainstone.c .start = MAINSTONE_IRQ(3), start 189 arch/arm/mach-pxa/mainstone.c .start = PXA_CS0_PHYS, start 194 arch/arm/mach-pxa/mainstone.c .start = PXA_CS1_PHYS, start 414 arch/arm/mach-pxa/mainstone.c .start = MST_FPGA_PHYS + 0xc0, start 419 arch/arm/mach-pxa/mainstone.c .start = PXA_GPIO_TO_IRQ(0), start 424 arch/arm/mach-pxa/mainstone.c .start = MAINSTONE_IRQ(0), start 407 arch/arm/mach-pxa/mioa701.c .start = PXA_CS0_PHYS, start 552 arch/arm/mach-pxa/mioa701.c .start = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT), start 559 arch/arm/mach-pxa/mioa701.c .start = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT), start 48 arch/arm/mach-pxa/mp900.c .start = 0x0d000000, start 53 arch/arm/mach-pxa/mp900.c .start = 0x0d000000 + 4, start 58 arch/arm/mach-pxa/mp900.c .start = 61, start 416 arch/arm/mach-pxa/mxm8x10.c .start = MXM_8X10_ETH_PHYS + 0x300, start 421 arch/arm/mach-pxa/mxm8x10.c .start = MXM_8X10_ETH_PHYS + 0x308, start 426 arch/arm/mach-pxa/mxm8x10.c .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO9)), start 145 arch/arm/mach-pxa/palmld.c .start = PXA_CS0_PHYS, start 408 arch/arm/mach-pxa/palmtc.c .start = PXA_CS0_PHYS, start 147 arch/arm/mach-pxa/palmtx.c .start = PXA_CS0_PHYS, start 284 arch/arm/mach-pxa/palmtx.c .start = PXA_CS1_PHYS, start 110 arch/arm/mach-pxa/pcm027.c .start = PCM027_ETH_PHYS + 0x300, start 115 arch/arm/mach-pxa/pcm027.c .start = PCM027_ETH_IRQ, start 160 arch/arm/mach-pxa/pcm027.c .start = PCM027_FLASH_PHYS, start 121 arch/arm/mach-pxa/poodle.c .start = 0x10800000, start 168 arch/arm/mach-pxa/poodle.c .start = 0x10000000, start 173 arch/arm/mach-pxa/poodle.c .start = PXA_GPIO_TO_IRQ(10), start 364 arch/arm/mach-pxa/poodle.c .start = 0x0C000000, start 394 arch/arm/mach-pxa/poodle.c .start = 0x00000000, start 304 arch/arm/mach-pxa/pxa3xx-ulpi.c r = request_mem_region(r->start, resource_size(r), pdev->name); start 311 arch/arm/mach-pxa/pxa3xx-ulpi.c u2d->mmio_base = ioremap(r->start, resource_size(r)); start 341 arch/arm/mach-pxa/pxa3xx-ulpi.c release_mem_region(r->start, resource_size(r)); start 365 arch/arm/mach-pxa/pxa3xx-ulpi.c release_mem_region(r->start, resource_size(r)); start 91 arch/arm/mach-pxa/saar.c .start = (SAAR_ETH_PHYS + 0x300), start 96 arch/arm/mach-pxa/saar.c .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO97)), start 554 arch/arm/mach-pxa/saar.c .start = SMC_CS0_PHYS_BASE, start 140 arch/arm/mach-pxa/spitz.c .start = 0x10800000, start 167 arch/arm/mach-pxa/spitz.c .start = 0x08800040, start 816 arch/arm/mach-pxa/spitz.c .start = PXA_CS3_PHYS, start 865 arch/arm/mach-pxa/spitz.c .start = PXA_CS0_PHYS, start 314 arch/arm/mach-pxa/stargate2.c .start = PXA_CS0_PHYS, start 658 arch/arm/mach-pxa/stargate2.c .start = (PXA_CS4_PHYS + 0x300), start 663 arch/arm/mach-pxa/stargate2.c .start = PXA_GPIO_TO_IRQ(40), start 764 arch/arm/mach-pxa/stargate2.c .start = PXA_CS1_PHYS, start 81 arch/arm/mach-pxa/tavorevb.c .start = (TAVOREVB_ETH_PHYS + 0x300), start 86 arch/arm/mach-pxa/tavorevb.c .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO47)), start 170 arch/arm/mach-pxa/tosa.c .start = TOSA_CF_PHYS, start 197 arch/arm/mach-pxa/tosa.c .start = TOSA_SCOOP_PHYS + 0x40, start 389 arch/arm/mach-pxa/tosa.c .start = PXA_GPIO_TO_IRQ(TOSA_GPIO_AC_IN), start 599 arch/arm/mach-pxa/tosa.c .start = TOSA_LCDC_PHYS, start 605 arch/arm/mach-pxa/tosa.c .start = TOSA_IRQ_GPIO_TC6393XB_INT, start 843 arch/arm/mach-pxa/tosa.c .start = 0x00000000, start 182 arch/arm/mach-pxa/trizeps4.c .start = PXA_CS0_PHYS, start 202 arch/arm/mach-pxa/trizeps4.c .start = TRIZEPS4_ETH_PHYS+0x300, start 207 arch/arm/mach-pxa/trizeps4.c .start = TRIZEPS4_ETH_PHYS+0x8300, start 212 arch/arm/mach-pxa/trizeps4.c .start = TRIZEPS4_ETH_IRQ, start 425 arch/arm/mach-pxa/viper.c .start = VIPER_ETH_PHYS + 0x300, start 430 arch/arm/mach-pxa/viper.c .start = PXA_GPIO_TO_IRQ(VIPER_ETH_GPIO), start 436 arch/arm/mach-pxa/viper.c .start = VIPER_ETH_DATA_PHYS, start 498 arch/arm/mach-pxa/viper.c .start = 0x40100000, start 503 arch/arm/mach-pxa/viper.c .start = 0x40200000, start 508 arch/arm/mach-pxa/viper.c .start = 0x40700000, start 513 arch/arm/mach-pxa/viper.c .start = VIPER_UARTA_PHYS, start 518 arch/arm/mach-pxa/viper.c .start = VIPER_UARTB_PHYS, start 602 arch/arm/mach-pxa/viper.c .start = VIPER_USB_PHYS + 0, start 607 arch/arm/mach-pxa/viper.c .start = VIPER_USB_PHYS + 2, start 612 arch/arm/mach-pxa/viper.c .start = PXA_GPIO_TO_IRQ(VIPER_USB_GPIO), start 650 arch/arm/mach-pxa/viper.c .start = VIPER_FLASH_PHYS, start 655 arch/arm/mach-pxa/viper.c .start = VIPER_BOOT_PHYS, start 663 arch/arm/mach-pxa/viper.c .start = _VIPER_SRAM_BASE, start 169 arch/arm/mach-pxa/vpac270.c .start = PXA_CS0_PHYS, start 210 arch/arm/mach-pxa/vpac270.c .start = PXA_CS0_PHYS, start 396 arch/arm/mach-pxa/vpac270.c .start = PXA_CS2_PHYS + 0x300, start 401 arch/arm/mach-pxa/vpac270.c .start = PXA_CS2_PHYS + 0x304, start 406 arch/arm/mach-pxa/vpac270.c .start = PXA_GPIO_TO_IRQ(GPIO114_VPAC270_ETH_IRQ), start 606 arch/arm/mach-pxa/vpac270.c .start = PXA_CS3_PHYS + 0x120, start 611 arch/arm/mach-pxa/vpac270.c .start = PXA_CS3_PHYS + 0x15c, start 616 arch/arm/mach-pxa/vpac270.c .start = PXA_CS3_PHYS + 0x20, start 621 arch/arm/mach-pxa/vpac270.c .start = PXA_GPIO_TO_IRQ(GPIO36_VPAC270_IDE_IRQ), start 81 arch/arm/mach-pxa/xcep.c .start = PXA_CS0_PHYS, start 103 arch/arm/mach-pxa/xcep.c .start = XCEP_ETH_PHYS, start 108 arch/arm/mach-pxa/xcep.c .start = XCEP_ETH_IRQ, start 114 arch/arm/mach-pxa/xcep.c .start = XCEP_ETH_ATTR, start 152 arch/arm/mach-pxa/z2.c .start = PXA_CS0_PHYS, start 170 arch/arm/mach-pxa/zeus.c .start = ZEUS_FLASH_PHYS, start 175 arch/arm/mach-pxa/zeus.c .start = ZEUS_SRAM_PHYS, start 204 arch/arm/mach-pxa/zeus.c .start = 0x10000000, start 209 arch/arm/mach-pxa/zeus.c .start = 0x10800000, start 214 arch/arm/mach-pxa/zeus.c .start = 0x11000000, start 219 arch/arm/mach-pxa/zeus.c .start = 0x40100000, start 224 arch/arm/mach-pxa/zeus.c .start = 0x40200000, start 229 arch/arm/mach-pxa/zeus.c .start = 0x40700000, start 318 arch/arm/mach-pxa/zeus.c .start = ZEUS_ETH0_PHYS, start 323 arch/arm/mach-pxa/zeus.c .start = ZEUS_ETH0_PHYS + 2, start 328 arch/arm/mach-pxa/zeus.c .start = PXA_GPIO_TO_IRQ(ZEUS_ETH0_GPIO), start 336 arch/arm/mach-pxa/zeus.c .start = ZEUS_ETH1_PHYS, start 341 arch/arm/mach-pxa/zeus.c .start = ZEUS_ETH1_PHYS + 2, start 346 arch/arm/mach-pxa/zeus.c .start = PXA_GPIO_TO_IRQ(ZEUS_ETH1_GPIO), start 378 arch/arm/mach-pxa/zeus.c .start = ZEUS_SRAM_PHYS, start 510 arch/arm/mach-pxa/zeus.c .start = ZEUS_CPLD_EXTWDOG_PHYS, start 49 arch/arm/mach-pxa/zylonite.c .start = ZYLONITE_ETH_PHYS + 0x300, start 54 arch/arm/mach-pxa/zylonite.c .start = -1, /* for run-time assignment */ start 442 arch/arm/mach-pxa/zylonite.c smc91x_resources[1].start = PXA_GPIO_TO_IRQ(gpio_eth_irq); start 270 arch/arm/mach-rockchip/pm.c rk3288_bootram_phy = res.start; start 130 arch/arm/mach-rpc/ecard.c ecard_loader_reset(res->start, ec->loader); start 142 arch/arm/mach-rpc/ecard.c ec->resource[ECARD_RES_MEMC].start; start 183 arch/arm/mach-rpc/ecard.c : &ec->resource[ECARD_RES_IOCSYNC])->start; start 673 arch/arm/mach-rpc/ecard.c (ec)->resource[nr].start = st; \ start 743 arch/arm/mach-rpc/ecard.c ec->resource[i].end -= ec->resource[i].start; start 744 arch/arm/mach-rpc/ecard.c ec->resource[i].start = 0; start 775 arch/arm/mach-rpc/ecard.c ec->resource[i].start, start 861 arch/arm/mach-rpc/ecard.c unsigned long start = ecard_resource_start(ec, res); start 864 arch/arm/mach-rpc/ecard.c if (offset > (end - start)) start 867 arch/arm/mach-rpc/ecard.c start += offset; start 868 arch/arm/mach-rpc/ecard.c if (maxsize && end - start > maxsize) start 869 arch/arm/mach-rpc/ecard.c end = start + maxsize; start 871 arch/arm/mach-rpc/ecard.c return devm_ioremap(&ec->dev, start, end - start); start 51 arch/arm/mach-s3c24xx/include/mach/pm-core.h static inline void s3c_pm_show_resume_irqs(int start, unsigned long which, start 60 arch/arm/mach-s3c24xx/include/mach/pm-core.h S3C_PMDBG("IRQ %d asserted at resume\n", start+i); start 91 arch/arm/mach-s3c24xx/s3c2412.c s3c_device_sdi.resource[1].start = IRQ_S3C2412_SDI; start 98 arch/arm/mach-s3c24xx/s3c2412.c s3c_device_spi1.resource[0].start = S3C24XX_PA_SPI + S3C2412_SPI1; start 74 arch/arm/mach-s3c24xx/s3c2416.c s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT; start 50 arch/arm/mach-s3c24xx/s3c2440.c s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; start 71 arch/arm/mach-s3c24xx/s3c2443.c s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT; start 371 arch/arm/mach-s3c64xx/common.c static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end) start 378 arch/arm/mach-s3c64xx/common.c status >>= start; start 379 arch/arm/mach-s3c64xx/common.c status &= (1 << (end - start + 1)) - 1; start 381 arch/arm/mach-s3c64xx/common.c for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) { start 421 arch/arm/mach-sa1100/generic.c sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start); start 18 arch/arm/mach-sa1100/generic.h mi->bank[__nr].start = (__start), \ start 285 arch/arm/mach-sa1100/neponset.c d->base = ioremap(nep_res->start, SZ_4K); start 356 arch/arm/mach-sa1100/neponset.c sa1111_resources[1].start = d->irq_base + NEP_IRQ_SA1111; start 362 arch/arm/mach-sa1100/neponset.c smc91x_resources[2].start = d->irq_base + NEP_IRQ_SMC91X; start 46 arch/arm/mach-sa1100/pci-nanoengine.c .start = NANO_PCI_MEM_RW_PHYS, start 124 arch/arm/mach-sa1100/pci-nanoengine.c .start = 0x78000000, start 192 arch/arm/mach-shmobile/platsmp-apmu.c apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res)); start 83 arch/arm/mach-shmobile/pm-rcar-gen2.c if (res.start & (256 * 1024 - 1) || start 89 arch/arm/mach-shmobile/pm-rcar-gen2.c p = ioremap(res.start, resource_size(&res)); start 107 arch/arm/mach-shmobile/pm-rcar-gen2.c bar = phys_to_sbar(res.start); start 902 arch/arm/mach-sunxi/mc_smp.c release_mem_region(res.start, resource_size(&res)); start 906 arch/arm/mach-sunxi/mc_smp.c release_mem_region(res.start, resource_size(&res)); start 90 arch/arm/mach-ux500/cpu-db8500.c if (!r.start) { start 94 arch/arm/mach-ux500/cpu-db8500.c prcmu_early_init(r.start, r.end-r.start); start 95 arch/arm/mach-ux500/cpu-db8500.c ux500_pm_init(r.start, r.end-r.start); start 64 arch/arm/mach-versatile/versatile_dt.c if (adev->res.start == VERSATILE_MMCI0_BASE) start 73 arch/arm/mm/cache-feroceon-l2.c static inline void l2_clean_pa_range(unsigned long start, unsigned long end) start 82 arch/arm/mm/cache-feroceon-l2.c BUG_ON((start ^ end) >> PAGE_SHIFT); start 84 arch/arm/mm/cache-feroceon-l2.c va_start = l2_get_va(start); start 85 arch/arm/mm/cache-feroceon-l2.c va_end = va_start + (end - start); start 104 arch/arm/mm/cache-feroceon-l2.c static inline void l2_inv_pa_range(unsigned long start, unsigned long end) start 113 arch/arm/mm/cache-feroceon-l2.c BUG_ON((start ^ end) >> PAGE_SHIFT); start 115 arch/arm/mm/cache-feroceon-l2.c va_start = l2_get_va(start); start 116 arch/arm/mm/cache-feroceon-l2.c va_end = va_start + (end - start); start 142 arch/arm/mm/cache-feroceon-l2.c static unsigned long calc_range_end(unsigned long start, unsigned long end) start 146 arch/arm/mm/cache-feroceon-l2.c BUG_ON(start & (CACHE_LINE_SIZE - 1)); start 159 arch/arm/mm/cache-feroceon-l2.c if (range_end > start + MAX_RANGE_SIZE) start 160 arch/arm/mm/cache-feroceon-l2.c range_end = start + MAX_RANGE_SIZE; start 165 arch/arm/mm/cache-feroceon-l2.c if (range_end > (start | (PAGE_SIZE - 1)) + 1) start 166 arch/arm/mm/cache-feroceon-l2.c range_end = (start | (PAGE_SIZE - 1)) + 1; start 171 arch/arm/mm/cache-feroceon-l2.c static void feroceon_l2_inv_range(unsigned long start, unsigned long end) start 176 arch/arm/mm/cache-feroceon-l2.c if (start & (CACHE_LINE_SIZE - 1)) { start 177 arch/arm/mm/cache-feroceon-l2.c l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); start 178 arch/arm/mm/cache-feroceon-l2.c start = (start | (CACHE_LINE_SIZE - 1)) + 1; start 184 arch/arm/mm/cache-feroceon-l2.c if (start < end && end & (CACHE_LINE_SIZE - 1)) { start 192 arch/arm/mm/cache-feroceon-l2.c while (start < end) { start 193 arch/arm/mm/cache-feroceon-l2.c unsigned long range_end = calc_range_end(start, end); start 194 arch/arm/mm/cache-feroceon-l2.c l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); start 195 arch/arm/mm/cache-feroceon-l2.c start = range_end; start 201 arch/arm/mm/cache-feroceon-l2.c static void feroceon_l2_clean_range(unsigned long start, unsigned long end) start 208 arch/arm/mm/cache-feroceon-l2.c start &= ~(CACHE_LINE_SIZE - 1); start 210 arch/arm/mm/cache-feroceon-l2.c while (start != end) { start 211 arch/arm/mm/cache-feroceon-l2.c unsigned long range_end = calc_range_end(start, end); start 212 arch/arm/mm/cache-feroceon-l2.c l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE); start 213 arch/arm/mm/cache-feroceon-l2.c start = range_end; start 220 arch/arm/mm/cache-feroceon-l2.c static void feroceon_l2_flush_range(unsigned long start, unsigned long end) start 222 arch/arm/mm/cache-feroceon-l2.c start &= ~(CACHE_LINE_SIZE - 1); start 224 arch/arm/mm/cache-feroceon-l2.c while (start != end) { start 225 arch/arm/mm/cache-feroceon-l2.c unsigned long range_end = calc_range_end(start, end); start 227 arch/arm/mm/cache-feroceon-l2.c l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE); start 228 arch/arm/mm/cache-feroceon-l2.c l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); start 229 arch/arm/mm/cache-feroceon-l2.c start = range_end; start 521 arch/arm/mm/cache-l2x0-pmu.c .start = l2x0_pmu_event_start, start 178 arch/arm/mm/cache-l2x0.c static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, start 181 arch/arm/mm/cache-l2x0.c while (start < end) { start 182 arch/arm/mm/cache-l2x0.c writel_relaxed(start, reg); start 183 arch/arm/mm/cache-l2x0.c start += CACHE_LINE_SIZE; start 187 arch/arm/mm/cache-l2x0.c static void l2c210_inv_range(unsigned long start, unsigned long end) start 191 arch/arm/mm/cache-l2x0.c if (start & (CACHE_LINE_SIZE - 1)) { start 192 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 193 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); start 194 arch/arm/mm/cache-l2x0.c start += CACHE_LINE_SIZE; start 202 arch/arm/mm/cache-l2x0.c __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); start 206 arch/arm/mm/cache-l2x0.c static void l2c210_clean_range(unsigned long start, unsigned long end) start 210 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 211 arch/arm/mm/cache-l2x0.c __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); start 215 arch/arm/mm/cache-l2x0.c static void l2c210_flush_range(unsigned long start, unsigned long end) start 219 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 220 arch/arm/mm/cache-l2x0.c __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); start 284 arch/arm/mm/cache-l2x0.c static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, start 289 arch/arm/mm/cache-l2x0.c while (start < end) { start 290 arch/arm/mm/cache-l2x0.c unsigned long blk_end = start + min(end - start, 4096UL); start 292 arch/arm/mm/cache-l2x0.c while (start < blk_end) { start 294 arch/arm/mm/cache-l2x0.c writel_relaxed(start, reg); start 295 arch/arm/mm/cache-l2x0.c start += CACHE_LINE_SIZE; start 307 arch/arm/mm/cache-l2x0.c static void l2c220_inv_range(unsigned long start, unsigned long end) start 313 arch/arm/mm/cache-l2x0.c if ((start | end) & (CACHE_LINE_SIZE - 1)) { start 314 arch/arm/mm/cache-l2x0.c if (start & (CACHE_LINE_SIZE - 1)) { start 315 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 316 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); start 317 arch/arm/mm/cache-l2x0.c start += CACHE_LINE_SIZE; start 328 arch/arm/mm/cache-l2x0.c start, end, flags); start 334 arch/arm/mm/cache-l2x0.c static void l2c220_clean_range(unsigned long start, unsigned long end) start 339 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 340 arch/arm/mm/cache-l2x0.c if ((end - start) >= l2x0_size) { start 347 arch/arm/mm/cache-l2x0.c start, end, flags); start 353 arch/arm/mm/cache-l2x0.c static void l2c220_flush_range(unsigned long start, unsigned long end) start 358 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 359 arch/arm/mm/cache-l2x0.c if ((end - start) >= l2x0_size) { start 366 arch/arm/mm/cache-l2x0.c start, end, flags); start 467 arch/arm/mm/cache-l2x0.c static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) start 471 arch/arm/mm/cache-l2x0.c if ((start | end) & (CACHE_LINE_SIZE - 1)) { start 478 arch/arm/mm/cache-l2x0.c if (start & (CACHE_LINE_SIZE - 1)) { start 479 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 480 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); start 481 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + L2X0_INV_LINE_PA); start 482 arch/arm/mm/cache-l2x0.c start += CACHE_LINE_SIZE; start 495 arch/arm/mm/cache-l2x0.c __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); start 499 arch/arm/mm/cache-l2x0.c static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) start 506 arch/arm/mm/cache-l2x0.c while (start < end) { start 507 arch/arm/mm/cache-l2x0.c unsigned long blk_end = start + min(end - start, 4096UL); start 510 arch/arm/mm/cache-l2x0.c while (start < blk_end) { start 511 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); start 512 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + L2X0_INV_LINE_PA); start 513 arch/arm/mm/cache-l2x0.c start += CACHE_LINE_SIZE; start 1348 arch/arm/mm/cache-l2x0.c static unsigned long aurora_range_end(unsigned long start, unsigned long end) start 1355 arch/arm/mm/cache-l2x0.c if (end > start + AURORA_MAX_RANGE_SIZE) start 1356 arch/arm/mm/cache-l2x0.c end = start + AURORA_MAX_RANGE_SIZE; start 1361 arch/arm/mm/cache-l2x0.c if (end > PAGE_ALIGN(start+1)) start 1362 arch/arm/mm/cache-l2x0.c end = PAGE_ALIGN(start+1); start 1367 arch/arm/mm/cache-l2x0.c static void aurora_pa_range(unsigned long start, unsigned long end, start 1377 arch/arm/mm/cache-l2x0.c start &= ~(CACHE_LINE_SIZE - 1); start 1383 arch/arm/mm/cache-l2x0.c while (start < end) { start 1384 arch/arm/mm/cache-l2x0.c range_end = aurora_range_end(start, end); start 1387 arch/arm/mm/cache-l2x0.c writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG); start 1392 arch/arm/mm/cache-l2x0.c start = range_end; start 1395 arch/arm/mm/cache-l2x0.c static void aurora_inv_range(unsigned long start, unsigned long end) start 1397 arch/arm/mm/cache-l2x0.c aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); start 1400 arch/arm/mm/cache-l2x0.c static void aurora_clean_range(unsigned long start, unsigned long end) start 1407 arch/arm/mm/cache-l2x0.c aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG); start 1410 arch/arm/mm/cache-l2x0.c static void aurora_flush_range(unsigned long start, unsigned long end) start 1413 arch/arm/mm/cache-l2x0.c aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); start 1415 arch/arm/mm/cache-l2x0.c aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG); start 1599 arch/arm/mm/cache-l2x0.c static void bcm_inv_range(unsigned long start, unsigned long end) start 1603 arch/arm/mm/cache-l2x0.c BUG_ON(start < BCM_SYS_EMI_START_ADDR); start 1605 arch/arm/mm/cache-l2x0.c if (unlikely(end <= start)) start 1608 arch/arm/mm/cache-l2x0.c new_start = bcm_l2_phys_addr(start); start 1612 arch/arm/mm/cache-l2x0.c if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { start 1626 arch/arm/mm/cache-l2x0.c static void bcm_clean_range(unsigned long start, unsigned long end) start 1630 arch/arm/mm/cache-l2x0.c BUG_ON(start < BCM_SYS_EMI_START_ADDR); start 1632 arch/arm/mm/cache-l2x0.c if (unlikely(end <= start)) start 1635 arch/arm/mm/cache-l2x0.c new_start = bcm_l2_phys_addr(start); start 1639 arch/arm/mm/cache-l2x0.c if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { start 1653 arch/arm/mm/cache-l2x0.c static void bcm_flush_range(unsigned long start, unsigned long end) start 1657 arch/arm/mm/cache-l2x0.c BUG_ON(start < BCM_SYS_EMI_START_ADDR); start 1659 arch/arm/mm/cache-l2x0.c if (unlikely(end <= start)) start 1662 arch/arm/mm/cache-l2x0.c if ((end - start) >= l2x0_size) { start 1667 arch/arm/mm/cache-l2x0.c new_start = bcm_l2_phys_addr(start); start 1671 arch/arm/mm/cache-l2x0.c if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { start 1769 arch/arm/mm/cache-l2x0.c l2x0_base = ioremap(res.start, resource_size(&res)); start 1773 arch/arm/mm/cache-l2x0.c l2x0_saved_regs.phy_base = res.start; start 69 arch/arm/mm/cache-tauros2.c static void tauros2_inv_range(unsigned long start, unsigned long end) start 74 arch/arm/mm/cache-tauros2.c if (start & (CACHE_LINE_SIZE - 1)) { start 75 arch/arm/mm/cache-tauros2.c tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); start 76 arch/arm/mm/cache-tauros2.c start = (start | (CACHE_LINE_SIZE - 1)) + 1; start 90 arch/arm/mm/cache-tauros2.c while (start < end) { start 91 arch/arm/mm/cache-tauros2.c tauros2_inv_pa(start); start 92 arch/arm/mm/cache-tauros2.c start += CACHE_LINE_SIZE; start 98 arch/arm/mm/cache-tauros2.c static void tauros2_clean_range(unsigned long start, unsigned long end) start 100 arch/arm/mm/cache-tauros2.c start &= ~(CACHE_LINE_SIZE - 1); start 101 arch/arm/mm/cache-tauros2.c while (start < end) { start 102 arch/arm/mm/cache-tauros2.c tauros2_clean_pa(start); start 103 arch/arm/mm/cache-tauros2.c start += CACHE_LINE_SIZE; start 109 arch/arm/mm/cache-tauros2.c static void tauros2_flush_range(unsigned long start, unsigned long end) start 111 arch/arm/mm/cache-tauros2.c start &= ~(CACHE_LINE_SIZE - 1); start 112 arch/arm/mm/cache-tauros2.c while (start < end) { start 113 arch/arm/mm/cache-tauros2.c tauros2_clean_inv_pa(start); start 114 arch/arm/mm/cache-tauros2.c start += CACHE_LINE_SIZE; start 113 arch/arm/mm/cache-uniphier.c unsigned long start, start 154 arch/arm/mm/cache-uniphier.c writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD); start 178 arch/arm/mm/cache-uniphier.c unsigned long start, unsigned long end, start 187 arch/arm/mm/cache-uniphier.c start = start & ~(data->line_size - 1); start 189 arch/arm/mm/cache-uniphier.c size = end - start; start 207 arch/arm/mm/cache-uniphier.c __uniphier_cache_maint_common(data, start, chunk_size, start 210 arch/arm/mm/cache-uniphier.c start += chunk_size; start 236 arch/arm/mm/cache-uniphier.c static void uniphier_cache_maint_range(unsigned long start, unsigned long end, start 242 arch/arm/mm/cache-uniphier.c __uniphier_cache_maint_range(data, start, end, operation); start 253 arch/arm/mm/cache-uniphier.c static void uniphier_cache_inv_range(unsigned long start, unsigned long end) start 255 arch/arm/mm/cache-uniphier.c uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV); start 258 arch/arm/mm/cache-uniphier.c static void uniphier_cache_clean_range(unsigned long start, unsigned long end) start 260 arch/arm/mm/cache-uniphier.c uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN); start 263 arch/arm/mm/cache-uniphier.c static void uniphier_cache_flush_range(unsigned long start, unsigned long end) start 265 arch/arm/mm/cache-uniphier.c uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH); start 86 arch/arm/mm/cache-xsc3l2.c static void xsc3_l2_inv_range(unsigned long start, unsigned long end) start 90 arch/arm/mm/cache-xsc3l2.c if (start == 0 && end == -1ul) { start 100 arch/arm/mm/cache-xsc3l2.c if (start & (CACHE_LINE_SIZE - 1)) { start 101 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); start 104 arch/arm/mm/cache-xsc3l2.c start = (start | (CACHE_LINE_SIZE - 1)) + 1; start 110 arch/arm/mm/cache-xsc3l2.c while (start < (end & ~(CACHE_LINE_SIZE - 1))) { start 111 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); start 113 arch/arm/mm/cache-xsc3l2.c start += CACHE_LINE_SIZE; start 119 arch/arm/mm/cache-xsc3l2.c if (start < end) { start 120 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); start 130 arch/arm/mm/cache-xsc3l2.c static void xsc3_l2_clean_range(unsigned long start, unsigned long end) start 136 arch/arm/mm/cache-xsc3l2.c start &= ~(CACHE_LINE_SIZE - 1); start 137 arch/arm/mm/cache-xsc3l2.c while (start < end) { start 138 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); start 140 arch/arm/mm/cache-xsc3l2.c start += CACHE_LINE_SIZE; start 168 arch/arm/mm/cache-xsc3l2.c static void xsc3_l2_flush_range(unsigned long start, unsigned long end) start 172 arch/arm/mm/cache-xsc3l2.c if (start == 0 && end == -1ul) { start 179 arch/arm/mm/cache-xsc3l2.c start &= ~(CACHE_LINE_SIZE - 1); start 180 arch/arm/mm/cache-xsc3l2.c while (start < end) { start 181 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); start 184 arch/arm/mm/cache-xsc3l2.c start += CACHE_LINE_SIZE; start 429 arch/arm/mm/dma-mapping.c phys_addr_t start = dma_mmu_remap[i].base; start 430 arch/arm/mm/dma-mapping.c phys_addr_t end = start + dma_mmu_remap[i].size; start 436 arch/arm/mm/dma-mapping.c if (start >= end) start 439 arch/arm/mm/dma-mapping.c map.pfn = __phys_to_pfn(start); start 440 arch/arm/mm/dma-mapping.c map.virtual = __phys_to_virt(start); start 441 arch/arm/mm/dma-mapping.c map.length = end - start; start 453 arch/arm/mm/dma-mapping.c for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); start 457 arch/arm/mm/dma-mapping.c flush_tlb_kernel_range(__phys_to_virt(start), start 475 arch/arm/mm/dma-mapping.c unsigned long start = (unsigned long) page_address(page); start 476 arch/arm/mm/dma-mapping.c unsigned end = start + size; start 478 arch/arm/mm/dma-mapping.c apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); start 479 arch/arm/mm/dma-mapping.c flush_tlb_kernel_range(start, end); start 530 arch/arm/mm/dma-mapping.c static bool __in_atomic_pool(void *start, size_t size) start 532 arch/arm/mm/dma-mapping.c return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); start 535 arch/arm/mm/dma-mapping.c static int __free_from_pool(void *start, size_t size) start 537 arch/arm/mm/dma-mapping.c if (!__in_atomic_pool(start, size)) start 540 arch/arm/mm/dma-mapping.c gen_pool_free(atomic_pool, (unsigned long)start, size); start 1145 arch/arm/mm/dma-mapping.c unsigned int count, start; start 1159 arch/arm/mm/dma-mapping.c start = bitmap_find_next_zero_area(mapping->bitmaps[i], start 1162 arch/arm/mm/dma-mapping.c if (start > mapping->bits) start 1165 arch/arm/mm/dma-mapping.c bitmap_set(mapping->bitmaps[i], start, count); start 1180 arch/arm/mm/dma-mapping.c start = bitmap_find_next_zero_area(mapping->bitmaps[i], start 1183 arch/arm/mm/dma-mapping.c if (start > mapping->bits) { start 1188 arch/arm/mm/dma-mapping.c bitmap_set(mapping->bitmaps[i], start, count); start 1193 arch/arm/mm/dma-mapping.c iova += start << PAGE_SHIFT; start 1201 arch/arm/mm/dma-mapping.c unsigned int start, count; start 1215 arch/arm/mm/dma-mapping.c start = (addr - bitmap_base) >> PAGE_SHIFT; start 1229 arch/arm/mm/dma-mapping.c bitmap_clear(mapping->bitmaps[bitmap_index], start, count); start 1662 arch/arm/mm/dma-mapping.c struct scatterlist *s = sg, *dma = sg, *start = sg; start 1675 arch/arm/mm/dma-mapping.c if (__map_sg_chunk(dev, start, size, &dma->dma_address, start 1683 arch/arm/mm/dma-mapping.c start = s; start 1689 arch/arm/mm/dma-mapping.c if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, start 302 arch/arm/mm/dump.c static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start, start 310 arch/arm/mm/dump.c addr = start + i * PAGE_SIZE; start 334 arch/arm/mm/dump.c static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) start 342 arch/arm/mm/dump.c addr = start + i * PMD_SIZE; start 358 arch/arm/mm/dump.c static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) start 365 arch/arm/mm/dump.c addr = start + i * PUD_SIZE; start 375 arch/arm/mm/dump.c unsigned long start) start 382 arch/arm/mm/dump.c addr = start + i * PGDIR_SIZE; start 79 arch/arm/mm/flush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 82 arch/arm/mm/flush.c vivt_flush_cache_range(vma, start, end); start 56 arch/arm/mm/init.c phys_initrd_start = __virt_to_phys(tag->u.initrd.start); start 65 arch/arm/mm/init.c phys_initrd_start = tag->u.initrd.start; start 147 arch/arm/mm/init.c unsigned long start = memblock_region_memory_base_pfn(reg); start 150 arch/arm/mm/init.c if (start < max_low) { start 152 arch/arm/mm/init.c zhole_size[0] -= low_end - start; start 156 arch/arm/mm/init.c unsigned long high_start = max(start, max_low); start 210 arch/arm/mm/init.c phys_addr_t start; start 224 arch/arm/mm/init.c start = round_down(phys_initrd_start, PAGE_SIZE); start 225 arch/arm/mm/init.c size = phys_initrd_size + (phys_initrd_start - start); start 228 arch/arm/mm/init.c if (!memblock_is_region_memory(start, size)) { start 230 arch/arm/mm/init.c (u64)start, size); start 234 arch/arm/mm/init.c if (memblock_is_region_reserved(start, size)) { start 236 arch/arm/mm/init.c (u64)start, size); start 240 arch/arm/mm/init.c memblock_reserve(start, size); start 358 arch/arm/mm/init.c unsigned long start, prev_end = 0; start 366 arch/arm/mm/init.c start = memblock_region_memory_base_pfn(reg); start 373 arch/arm/mm/init.c start = min(start, start 381 arch/arm/mm/init.c start = round_down(start, MAX_ORDER_NR_PAGES); start 387 arch/arm/mm/init.c if (prev_end && prev_end < start) start 388 arch/arm/mm/init.c free_memmap(prev_end, start); start 422 arch/arm/mm/init.c unsigned long start = memblock_region_memory_base_pfn(mem); start 433 arch/arm/mm/init.c if (start < max_low) start 434 arch/arm/mm/init.c start = max_low; start 443 arch/arm/mm/init.c if (res_end < start) start 445 arch/arm/mm/init.c if (res_start < start) start 446 arch/arm/mm/init.c res_start = start; start 451 arch/arm/mm/init.c if (res_start != start) start 452 arch/arm/mm/init.c free_area_high(start, res_start); start 453 arch/arm/mm/init.c start = res_end; start 454 arch/arm/mm/init.c if (start == end) start 459 arch/arm/mm/init.c if (start < end) start 460 arch/arm/mm/init.c free_area_high(start, end); start 509 arch/arm/mm/init.c unsigned long start; start 523 arch/arm/mm/init.c .start = PAGE_OFFSET, start 531 arch/arm/mm/init.c .start = (unsigned long)__init_begin, start 539 arch/arm/mm/init.c .start = (unsigned long)__start_rodata_section_aligned, start 550 arch/arm/mm/init.c .start = (unsigned long)_stext, start 606 arch/arm/mm/init.c if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || start 609 arch/arm/mm/init.c perms[i].name, perms[i].start, perms[i].end, start 614 arch/arm/mm/init.c for (addr = perms[i].start; start 701 arch/arm/mm/init.c void free_initrd_mem(unsigned long start, unsigned long end) start 703 arch/arm/mm/init.c if (start == initrd_start) start 704 arch/arm/mm/init.c start = round_down(start, PAGE_SIZE); start 708 arch/arm/mm/init.c poison_init_mem((void *)start, PAGE_ALIGN(end) - start); start 709 arch/arm/mm/init.c free_reserved_area((void *)start, (void *)end, -1, "initrd"); start 1458 arch/arm/mm/mmu.c phys_addr_t start = reg->base; start 1459 arch/arm/mm/mmu.c phys_addr_t end = start + reg->size; start 1467 arch/arm/mm/mmu.c if (start >= end) start 1471 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(start); start 1472 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(start); start 1473 arch/arm/mm/mmu.c map.length = end - start; start 1477 arch/arm/mm/mmu.c } else if (start >= kernel_x_end) { start 1478 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(start); start 1479 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(start); start 1480 arch/arm/mm/mmu.c map.length = end - start; start 1486 arch/arm/mm/mmu.c if (start < kernel_x_start) { start 1487 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(start); start 1488 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(start); start 1489 arch/arm/mm/mmu.c map.length = kernel_x_start - start; start 29 arch/arm/mm/pageattr.c static bool in_range(unsigned long start, unsigned long size, start 32 arch/arm/mm/pageattr.c return start >= range_start && start < range_end && start 33 arch/arm/mm/pageattr.c size <= range_end - start; start 39 arch/arm/mm/pageattr.c unsigned long start = addr & PAGE_MASK; start 41 arch/arm/mm/pageattr.c unsigned long size = end - start; start 45 arch/arm/mm/pageattr.c WARN_ON_ONCE(start != addr); start 50 arch/arm/mm/pageattr.c if (!in_range(start, size, MODULES_VADDR, MODULES_END) && start 51 arch/arm/mm/pageattr.c !in_range(start, size, VMALLOC_START, VMALLOC_END)) start 57 arch/arm/mm/pageattr.c ret = apply_to_page_range(&init_mm, start, size, change_page_range, start 60 arch/arm/mm/pageattr.c flush_tlb_kernel_range(start, end); start 365 arch/arm/mm/pmsa-v7.c static int __init mpu_setup_region(unsigned int number, phys_addr_t start, start 392 arch/arm/mm/pmsa-v7.c drbar_write(start); start 399 arch/arm/mm/pmsa-v7.c irbar_write(start); start 408 arch/arm/mm/pmsa-v7.c mpu_rgn_info.rgns[number].drbar = start; start 160 arch/arm/mm/pmsa-v8.c static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end) start 167 arch/arm/mm/pmsa-v8.c bar = start; start 176 arch/arm/mm/pmsa-v8.c static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end) start 183 arch/arm/mm/pmsa-v8.c bar = start; start 192 arch/arm/mm/pmsa-v8.c static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end) start 199 arch/arm/mm/pmsa-v8.c bar = start; start 221 arch/arm/mm/pmsa-v8.c static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end) start 228 arch/arm/mm/pmsa-v8.c bar = start; start 270 arch/arm/mm/pmsa-v8.c subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end); start 287 arch/arm/mm/pmsa-v8.c err |= pmsav8_setup_io(region++, io[i].start, io[i].end); start 295 arch/arm/mm/pmsa-v8.c err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end); start 111 arch/arm/plat-omap/debug-leds.c fpga = ioremap(iomem->start, resource_size(iomem)); start 4 arch/arm/plat-omap/include/plat/sram.h void omap_map_sram(unsigned long start, unsigned long size, start 97 arch/arm/plat-omap/sram.c void __init omap_map_sram(unsigned long start, unsigned long size, start 106 arch/arm/plat-omap/sram.c start = ROUND_DOWN(start, PAGE_SIZE); start 109 arch/arm/plat-omap/sram.c omap_sram_base = __arm_ioremap_exec(start, size, cached); start 61 arch/arm/plat-orion/common.c resources[0].start = mapbase; start 75 arch/arm/plat-orion/common.c resources[1].start = irq; start 227 arch/arm/plat-orion/common.c orion_rtc_resource[0].start = mapbase; start 230 arch/arm/plat-orion/common.c orion_rtc_resource[1].start = irq; start 248 arch/arm/plat-orion/common.c orion_ge_resource->start = irq; start 635 arch/arm/plat-orion/common.c orion_xor0_shared_resources[0].start = mapbase_low; start 637 arch/arm/plat-orion/common.c orion_xor0_shared_resources[1].start = mapbase_high; start 640 arch/arm/plat-orion/common.c orion_xor0_shared_resources[2].start = irq_0; start 642 arch/arm/plat-orion/common.c orion_xor0_shared_resources[3].start = irq_1; start 696 arch/arm/plat-orion/common.c orion_xor1_shared_resources[0].start = mapbase_low; start 698 arch/arm/plat-orion/common.c orion_xor1_shared_resources[1].start = mapbase_high; start 701 arch/arm/plat-orion/common.c orion_xor1_shared_resources[2].start = irq_0; start 703 arch/arm/plat-orion/common.c orion_xor1_shared_resources[3].start = irq_1; start 853 arch/arm/plat-orion/common.c orion_crypto_resources[2].start = srambase; start 443 arch/arm/plat-pxa/include/plat/mfp.h unsigned int start; start 448 arch/arm/plat-pxa/include/plat/mfp.h #define MFP_ADDR_X(start, end, offset) \ start 449 arch/arm/plat-pxa/include/plat/mfp.h { MFP_PIN_##start, MFP_PIN_##end, offset } start 251 arch/arm/plat-pxa/mfp.c for (p = map; p->start != MFP_PIN_INVALID; p++) { start 253 arch/arm/plat-pxa/mfp.c i = p->start; start 133 arch/arm/plat-pxa/ssp.c res = devm_request_mem_region(dev, res->start, resource_size(res), start 140 arch/arm/plat-pxa/ssp.c ssp->phys_base = res->start; start 142 arch/arm/plat-pxa/ssp.c ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res)); start 1220 arch/arm/plat-samsung/gpio-samsung.c int s3c_gpio_cfgpin_range(unsigned int start, unsigned int nr, start 1225 arch/arm/plat-samsung/gpio-samsung.c for (; nr > 0; nr--, start++) { start 1226 arch/arm/plat-samsung/gpio-samsung.c ret = s3c_gpio_cfgpin(start, cfg); start 1235 arch/arm/plat-samsung/gpio-samsung.c int s3c_gpio_cfgall_range(unsigned int start, unsigned int nr, start 1240 arch/arm/plat-samsung/gpio-samsung.c for (; nr > 0; nr--, start++) { start 1241 arch/arm/plat-samsung/gpio-samsung.c s3c_gpio_setpull(start, pull); start 1242 arch/arm/plat-samsung/gpio-samsung.c ret = s3c_gpio_cfgpin(start, cfg); start 118 arch/arm/plat-samsung/include/plat/gpio-cfg.h extern int s3c_gpio_cfgpin_range(unsigned int start, unsigned int nr, start 169 arch/arm/plat-samsung/include/plat/gpio-cfg.h extern int s3c_gpio_cfgall_range(unsigned int start, unsigned int nr, start 55 arch/arm/plat-samsung/pm-check.c (unsigned long)ptr->start, start 77 arch/arm/plat-samsung/pm-check.c (unsigned long)res->start, (unsigned long)res->end, size); start 108 arch/arm/plat-samsung/pm-check.c for (addr = res->start; addr < res->end; start 171 arch/arm/plat-samsung/pm-check.c for (addr = res->start; addr < res->end; start 44 arch/arm/vdso/vgettimeofday.c static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start) start 47 arch/arm/vdso/vgettimeofday.c return vdata->seq_count != start; start 33 arch/arm64/include/asm/alternative.h void apply_alternatives_module(void *start, size_t length); start 35 arch/arm64/include/asm/alternative.h static inline void apply_alternatives_module(void *start, size_t length) { } start 421 arch/arm64/include/asm/assembler.h .macro invalidate_icache_by_line start, end, tmp1, tmp2, label start 64 arch/arm64/include/asm/cacheflush.h extern void __flush_icache_range(unsigned long start, unsigned long end); start 65 arch/arm64/include/asm/cacheflush.h extern int invalidate_icache_range(unsigned long start, unsigned long end); start 71 arch/arm64/include/asm/cacheflush.h extern long __flush_cache_user_range(unsigned long start, unsigned long end); start 74 arch/arm64/include/asm/cacheflush.h static inline void flush_icache_range(unsigned long start, unsigned long end) start 76 arch/arm64/include/asm/cacheflush.h __flush_icache_range(start, end); start 108 arch/arm64/include/asm/cacheflush.h unsigned long start, unsigned long end) start 169 arch/arm64/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 173 arch/arm64/include/asm/cacheflush.h static inline void flush_cache_vunmap(unsigned long start, unsigned long end) start 430 arch/arm64/include/asm/kvm_host.h unsigned long start, unsigned long end); start 432 arch/arm64/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); start 33 arch/arm64/include/asm/numa.h int __init numa_add_memblk(int nodeid, u64 start, u64 end); start 41 arch/arm64/include/asm/tlb.h __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level); start 181 arch/arm64/include/asm/tlbflush.h unsigned long start, unsigned long end, start 187 arch/arm64/include/asm/tlbflush.h start = round_down(start, stride); start 190 arch/arm64/include/asm/tlbflush.h if ((end - start) >= (MAX_TLBI_OPS * stride)) { start 198 arch/arm64/include/asm/tlbflush.h start = __TLBI_VADDR(start, asid); start 202 arch/arm64/include/asm/tlbflush.h for (addr = start; addr < end; addr += stride) { start 215 arch/arm64/include/asm/tlbflush.h unsigned long start, unsigned long end) start 221 arch/arm64/include/asm/tlbflush.h __flush_tlb_range(vma, start, end, PAGE_SIZE, false); start 224 arch/arm64/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 228 arch/arm64/include/asm/tlbflush.h if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { start 233 arch/arm64/include/asm/tlbflush.h start = __TLBI_VADDR(start, 0); start 237 arch/arm64/include/asm/tlbflush.h for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) start 129 arch/arm64/kernel/alternative.c static void clean_dcache_range_nopatch(u64 start, u64 end) start 136 arch/arm64/kernel/alternative.c cur = start & ~(d_size - 1); start 263 arch/arm64/kernel/alternative.c void apply_alternatives_module(void *start, size_t length) start 266 arch/arm64/kernel/alternative.c .begin = start, start 267 arch/arm64/kernel/alternative.c .end = start + length, start 201 arch/arm64/kernel/cpuinfo.c .start = c_start, start 281 arch/arm64/kernel/hibernate.c #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) start 369 arch/arm64/kernel/hibernate.c static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, start 374 arch/arm64/kernel/hibernate.c unsigned long addr = start; start 380 arch/arm64/kernel/hibernate.c dst_ptep = pte_offset_kernel(dst_pmdp, start); start 382 arch/arm64/kernel/hibernate.c src_ptep = pte_offset_kernel(src_pmdp, start); start 390 arch/arm64/kernel/hibernate.c static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, start 396 arch/arm64/kernel/hibernate.c unsigned long addr = start; start 404 arch/arm64/kernel/hibernate.c dst_pmdp = pmd_offset(dst_pudp, start); start 406 arch/arm64/kernel/hibernate.c src_pmdp = pmd_offset(src_pudp, start); start 425 arch/arm64/kernel/hibernate.c static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, start 431 arch/arm64/kernel/hibernate.c unsigned long addr = start; start 439 arch/arm64/kernel/hibernate.c dst_pudp = pud_offset(dst_pgdp, start); start 441 arch/arm64/kernel/hibernate.c src_pudp = pud_offset(src_pgdp, start); start 460 arch/arm64/kernel/hibernate.c static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, start 464 arch/arm64/kernel/hibernate.c unsigned long addr = start; start 465 arch/arm64/kernel/hibernate.c pgd_t *src_pgdp = pgd_offset_k(start); start 467 arch/arm64/kernel/hibernate.c dst_pgdp = pgd_offset_raw(dst_pgdp, start); start 102 arch/arm64/kernel/kexec_image.c image->start = kernel_segment->mem; start 42 arch/arm64/kernel/machine_kexec.c pr_debug(" start: %lx\n", kimage->start); start 218 arch/arm64/kernel/machine_kexec.c cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, start 339 arch/arm64/kernel/machine_kexec.c if ((addr < crashk_res.start) || (crashk_res.end < addr)) start 1028 arch/arm64/kernel/perf_event.c cpu_pmu->start = armv8pmu_start; start 85 arch/arm64/kernel/psci.c unsigned long start, end; start 95 arch/arm64/kernel/psci.c start = jiffies; start 96 arch/arm64/kernel/psci.c end = start + msecs_to_jiffies(100); start 101 arch/arm64/kernel/psci.c jiffies_to_msecs(jiffies - start)); start 804 arch/arm64/kernel/ptrace.c unsigned long start, end; start 831 arch/arm64/kernel/ptrace.c start = SVE_PT_SVE_OFFSET; start 835 arch/arm64/kernel/ptrace.c start, end); start 839 arch/arm64/kernel/ptrace.c start = end; start 842 arch/arm64/kernel/ptrace.c start, end); start 850 arch/arm64/kernel/ptrace.c start = end; start 854 arch/arm64/kernel/ptrace.c start, end); start 858 arch/arm64/kernel/ptrace.c start = end; start 861 arch/arm64/kernel/ptrace.c start, end); start 872 arch/arm64/kernel/ptrace.c unsigned long start, end; start 930 arch/arm64/kernel/ptrace.c start = SVE_PT_SVE_OFFSET; start 934 arch/arm64/kernel/ptrace.c start, end); start 938 arch/arm64/kernel/ptrace.c start = end; start 941 arch/arm64/kernel/ptrace.c start, end); start 949 arch/arm64/kernel/ptrace.c start = end; start 953 arch/arm64/kernel/ptrace.c start, end); start 1247 arch/arm64/kernel/ptrace.c unsigned int i, start, num_regs; start 1253 arch/arm64/kernel/ptrace.c start = pos / regset->size; start 1255 arch/arm64/kernel/ptrace.c if (start + num_regs > regset->n) start 1259 arch/arm64/kernel/ptrace.c unsigned int idx = start + i; start 1301 arch/arm64/kernel/ptrace.c unsigned int i, start, num_regs; start 1307 arch/arm64/kernel/ptrace.c start = pos / regset->size; start 1309 arch/arm64/kernel/ptrace.c if (start + num_regs > regset->n) start 1315 arch/arm64/kernel/ptrace.c unsigned int idx = start + i; start 65 arch/arm64/kernel/setup.c .start = 0, start 71 arch/arm64/kernel/setup.c .start = 0, start 209 arch/arm64/kernel/setup.c kernel_code.start = __pa_symbol(_text); start 211 arch/arm64/kernel/setup.c kernel_data.start = __pa_symbol(_sdata); start 229 arch/arm64/kernel/setup.c res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); start 234 arch/arm64/kernel/setup.c if (kernel_code.start >= res->start && start 237 arch/arm64/kernel/setup.c if (kernel_data.start >= res->start && start 242 arch/arm64/kernel/setup.c if (crashk_res.end && crashk_res.start >= res->start && start 257 arch/arm64/kernel/setup.c if (!memblock_is_region_reserved(mem->start, mem_size)) start 261 arch/arm64/kernel/setup.c resource_size_t start, end; start 263 arch/arm64/kernel/setup.c start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); start 266 arch/arm64/kernel/setup.c if (start > mem->end || end < mem->start) start 269 arch/arm64/kernel/setup.c reserve_region_with_split(mem, start, end, "reserved"); start 35 arch/arm64/kernel/smp_spin_table.c void *start = (void *)&secondary_holding_pen_release; start 39 arch/arm64/kernel/smp_spin_table.c __flush_dcache_area(start, size); start 25 arch/arm64/kernel/sys_compat.c __do_compat_cache_op(unsigned long start, unsigned long end) start 30 arch/arm64/kernel/sys_compat.c unsigned long chunk = min(PAGE_SIZE, end - start); start 44 arch/arm64/kernel/sys_compat.c ret = __flush_cache_user_range(start, start + chunk); start 49 arch/arm64/kernel/sys_compat.c start += chunk; start 50 arch/arm64/kernel/sys_compat.c } while (start < end); start 56 arch/arm64/kernel/sys_compat.c do_compat_cache_op(unsigned long start, unsigned long end, int flags) start 58 arch/arm64/kernel/sys_compat.c if (end < start || flags) start 61 arch/arm64/kernel/sys_compat.c if (!access_ok((const void __user *)start, end - start)) start 64 arch/arm64/kernel/sys_compat.c return __do_compat_cache_op(start, end); start 28 arch/arm64/lib/delay.c cycles_t start = get_cycles(); start 34 arch/arm64/lib/delay.c while ((get_cycles() - start + timer_evt_period) < cycles) start 38 arch/arm64/lib/delay.c while ((get_cycles() - start) < cycles) start 295 arch/arm64/mm/dump.c static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, start 298 arch/arm64/mm/dump.c unsigned long addr = start; start 299 arch/arm64/mm/dump.c pte_t *ptep = pte_offset_kernel(pmdp, start); start 306 arch/arm64/mm/dump.c static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, start 309 arch/arm64/mm/dump.c unsigned long next, addr = start; start 310 arch/arm64/mm/dump.c pmd_t *pmdp = pmd_offset(pudp, start); start 325 arch/arm64/mm/dump.c static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, start 328 arch/arm64/mm/dump.c unsigned long next, addr = start; start 329 arch/arm64/mm/dump.c pud_t *pudp = pud_offset(pgdp, start); start 345 arch/arm64/mm/dump.c unsigned long start) start 347 arch/arm64/mm/dump.c unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; start 348 arch/arm64/mm/dump.c unsigned long next, addr = start; start 349 arch/arm64/mm/dump.c pgd_t *pgdp = pgd_offset(mm, start); start 113 arch/arm64/mm/init.c crashk_res.start = crash_base; start 217 arch/arm64/mm/init.c unsigned long start = memblock_region_memory_base_pfn(reg); start 220 arch/arm64/mm/init.c if (start >= max) start 224 arch/arm64/mm/init.c if (start < max_dma) { start 226 arch/arm64/mm/init.c zhole_size[ZONE_DMA32] -= dma_end - start; start 231 arch/arm64/mm/init.c unsigned long normal_start = max(start, max_dma); start 493 arch/arm64/mm/init.c unsigned long start, prev_end = 0; start 497 arch/arm64/mm/init.c start = __phys_to_pfn(reg->base); start 504 arch/arm64/mm/init.c start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); start 510 arch/arm64/mm/init.c if (prev_end && prev_end < start) start 511 arch/arm64/mm/init.c free_memmap(prev_end, start); start 584 arch/arm64/mm/init.c void __init free_initrd_mem(unsigned long start, unsigned long end) start 588 arch/arm64/mm/init.c aligned_start = __virt_to_phys(start) & PAGE_MASK; start 591 arch/arm64/mm/init.c free_reserved_area((void *)start, (void *)end, 0, "initrd"); start 167 arch/arm64/mm/kasan_init.c static void __init kasan_map_populate(unsigned long start, unsigned long end, start 170 arch/arm64/mm/kasan_init.c kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false); start 188 arch/arm64/mm/kasan_init.c static void __init clear_pgds(unsigned long start, start 196 arch/arm64/mm/kasan_init.c for (; start < end; start += PGDIR_SIZE) start 197 arch/arm64/mm/kasan_init.c set_pgd(pgd_offset_k(start), __pgd(0)); start 239 arch/arm64/mm/kasan_init.c void *start = (void *)__phys_to_virt(reg->base); start 242 arch/arm64/mm/kasan_init.c if (start >= end) start 245 arch/arm64/mm/kasan_init.c kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), start 247 arch/arm64/mm/kasan_init.c early_pfn_to_nid(virt_to_pfn(start))); start 442 arch/arm64/mm/mmu.c static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, start 445 arch/arm64/mm/mmu.c __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, start 478 arch/arm64/mm/mmu.c memblock_mark_nomap(crashk_res.start, start 484 arch/arm64/mm/mmu.c phys_addr_t start = reg->base; start 485 arch/arm64/mm/mmu.c phys_addr_t end = start + reg->size; start 487 arch/arm64/mm/mmu.c if (start >= end) start 492 arch/arm64/mm/mmu.c __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); start 516 arch/arm64/mm/mmu.c __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, start 519 arch/arm64/mm/mmu.c memblock_clear_nomap(crashk_res.start, start 730 arch/arm64/mm/mmu.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 733 arch/arm64/mm/mmu.c return vmemmap_populate_basepages(start, end, node); start 736 arch/arm64/mm/mmu.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 739 arch/arm64/mm/mmu.c unsigned long addr = start; start 772 arch/arm64/mm/mmu.c void vmemmap_free(unsigned long start, unsigned long end, start 1053 arch/arm64/mm/mmu.c int arch_add_memory(int nid, u64 start, u64 size, start 1061 arch/arm64/mm/mmu.c __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), start 1064 arch/arm64/mm/mmu.c return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, start 1067 arch/arm64/mm/mmu.c void arch_remove_memory(int nid, u64 start, u64 size, start 1070 arch/arm64/mm/mmu.c unsigned long start_pfn = start >> PAGE_SHIFT; start 200 arch/arm64/mm/numa.c int __init numa_add_memblk(int nid, u64 start, u64 end) start 204 arch/arm64/mm/numa.c ret = memblock_set_node(start, (end - start), &memblock.memory, nid); start 207 arch/arm64/mm/numa.c start, (end - 1), nid); start 37 arch/arm64/mm/pageattr.c static int __change_memory_common(unsigned long start, unsigned long size, start 46 arch/arm64/mm/pageattr.c ret = apply_to_page_range(&init_mm, start, size, change_page_range, start 49 arch/arm64/mm/pageattr.c flush_tlb_kernel_range(start, start + size); start 56 arch/arm64/mm/pageattr.c unsigned long start = addr; start 58 arch/arm64/mm/pageattr.c unsigned long end = start + size; start 63 arch/arm64/mm/pageattr.c start &= PAGE_MASK; start 64 arch/arm64/mm/pageattr.c end = start + size; start 108 arch/arm64/mm/pageattr.c return __change_memory_common(start, size, set_mask, clear_mask); start 817 arch/arm64/net/bpf_jit_comp.c static inline void bpf_flush_icache(void *start, void *end) start 819 arch/arm64/net/bpf_jit_comp.c flush_icache_range((unsigned long)start, (unsigned long)end); start 63 arch/c6x/include/asm/cache.h extern void enable_caching(unsigned long start, unsigned long end); start 64 arch/c6x/include/asm/cache.h extern void disable_caching(unsigned long start, unsigned long end); start 77 arch/c6x/include/asm/cache.h extern void L1P_cache_block_invalidate(unsigned int start, unsigned int end); start 78 arch/c6x/include/asm/cache.h extern void L1D_cache_block_invalidate(unsigned int start, unsigned int end); start 79 arch/c6x/include/asm/cache.h extern void L1D_cache_block_writeback_invalidate(unsigned int start, start 81 arch/c6x/include/asm/cache.h extern void L1D_cache_block_writeback(unsigned int start, unsigned int end); start 82 arch/c6x/include/asm/cache.h extern void L2_cache_block_invalidate(unsigned int start, unsigned int end); start 83 arch/c6x/include/asm/cache.h extern void L2_cache_block_writeback(unsigned int start, unsigned int end); start 84 arch/c6x/include/asm/cache.h extern void L2_cache_block_writeback_invalidate(unsigned int start, start 86 arch/c6x/include/asm/cache.h extern void L2_cache_block_invalidate_nowait(unsigned int start, start 88 arch/c6x/include/asm/cache.h extern void L2_cache_block_writeback_nowait(unsigned int start, start 91 arch/c6x/include/asm/cache.h extern void L2_cache_block_writeback_invalidate_nowait(unsigned int start, start 25 arch/c6x/include/asm/cacheflush.h #define flush_cache_range(mm, start, end) do {} while (0) start 27 arch/c6x/include/asm/cacheflush.h #define flush_cache_vmap(start, end) do {} while (0) start 28 arch/c6x/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do {} while (0) start 15 arch/c6x/include/asm/setup.h extern int c6x_add_memory(phys_addr_t start, unsigned long size); start 28 arch/c6x/include/asm/setup.h extern void coherent_mem_init(u32 start, u32 size); start 239 arch/c6x/kernel/setup.c int __init c6x_add_memory(phys_addr_t start, unsigned long size) start 247 arch/c6x/kernel/setup.c if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size)) start 250 arch/c6x/kernel/setup.c ram_start = start; start 251 arch/c6x/kernel/setup.c ram_end = start + size; start 118 arch/c6x/mm/dma-coherent.c void __init coherent_mem_init(phys_addr_t start, u32 size) start 125 arch/c6x/mm/dma-coherent.c start, size); start 127 arch/c6x/mm/dma-coherent.c dma_base = start; start 129 arch/c6x/platforms/cache.c static void cache_block_operation(unsigned int *start, start 137 arch/c6x/platforms/cache.c - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; start 140 arch/c6x/platforms/cache.c for (; wcnt; wcnt -= wc, start += wc) { start 157 arch/c6x/platforms/cache.c imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); start 174 arch/c6x/platforms/cache.c static void cache_block_operation_nowait(unsigned int *start, start 182 arch/c6x/platforms/cache.c - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; start 185 arch/c6x/platforms/cache.c for (; wcnt; wcnt -= wc, start += wc) { start 189 arch/c6x/platforms/cache.c imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); start 322 arch/c6x/platforms/cache.c void enable_caching(unsigned long start, unsigned long end) start 324 arch/c6x/platforms/cache.c unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); start 331 arch/c6x/platforms/cache.c void disable_caching(unsigned long start, unsigned long end) start 333 arch/c6x/platforms/cache.c unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); start 344 arch/c6x/platforms/cache.c void L1P_cache_block_invalidate(unsigned int start, unsigned int end) start 346 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 352 arch/c6x/platforms/cache.c void L1D_cache_block_invalidate(unsigned int start, unsigned int end) start 354 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 359 arch/c6x/platforms/cache.c void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end) start 361 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 366 arch/c6x/platforms/cache.c void L1D_cache_block_writeback(unsigned int start, unsigned int end) start 368 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 377 arch/c6x/platforms/cache.c void L2_cache_block_invalidate(unsigned int start, unsigned int end) start 379 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 384 arch/c6x/platforms/cache.c void L2_cache_block_writeback(unsigned int start, unsigned int end) start 386 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 391 arch/c6x/platforms/cache.c void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end) start 393 arch/c6x/platforms/cache.c cache_block_operation((unsigned int *) start, start 398 arch/c6x/platforms/cache.c void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end) start 400 arch/c6x/platforms/cache.c cache_block_operation_nowait((unsigned int *) start, start 405 arch/c6x/platforms/cache.c void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end) start 407 arch/c6x/platforms/cache.c cache_block_operation_nowait((unsigned int *) start, start 412 arch/c6x/platforms/cache.c void L2_cache_block_writeback_invalidate_nowait(unsigned int start, start 415 arch/c6x/platforms/cache.c cache_block_operation_nowait((unsigned int *) start, start 69 arch/csky/abiv1/cacheflush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 45 arch/csky/abiv1/inc/abi/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 46 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_vmap(start, end) cache_wbinv_all() start 47 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_vunmap(start, end) cache_wbinv_all() start 50 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_icache_range(start, end) cache_wbinv_range(start, end) start 11 arch/csky/abiv2/cacheflush.c unsigned long start; start 13 arch/csky/abiv2/cacheflush.c start = (unsigned long) kmap_atomic(page); start 15 arch/csky/abiv2/cacheflush.c cache_wbinv_range(start, start + PAGE_SIZE); start 17 arch/csky/abiv2/cacheflush.c kunmap_atomic((void *)start); start 17 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_range(vma, start, end) \ start 29 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_icache_range(start, end) cache_wbinv_range(start, end) start 35 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_vmap(start, end) do { } while (0) start 36 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 15 arch/csky/include/asm/cache.h void dcache_wb_line(unsigned long start); start 17 arch/csky/include/asm/cache.h void icache_inv_range(unsigned long start, unsigned long end); start 20 arch/csky/include/asm/cache.h void dcache_wb_range(unsigned long start, unsigned long end); start 23 arch/csky/include/asm/cache.h void cache_wbinv_range(unsigned long start, unsigned long end); start 26 arch/csky/include/asm/cache.h void dma_wbinv_range(unsigned long start, unsigned long end); start 27 arch/csky/include/asm/cache.h void dma_inv_range(unsigned long start, unsigned long end); start 28 arch/csky/include/asm/cache.h void dma_wb_range(unsigned long start, unsigned long end); start 19 arch/csky/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 21 arch/csky/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 75 arch/csky/kernel/cpu-probe.c .start = c_start, start 1213 arch/csky/kernel/perf_event.c .start = csky_pmu_start, start 44 arch/csky/mm/cachev1.c unsigned int start, start 53 arch/csky/mm/cachev1.c if (unlikely((end - start) >= PAGE_SIZE) || start 54 arch/csky/mm/cachev1.c unlikely(start < PAGE_OFFSET) || start 55 arch/csky/mm/cachev1.c unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { start 67 arch/csky/mm/cachev1.c i = start & ~(L1_CACHE_BYTES - 1); start 80 arch/csky/mm/cachev1.c void dcache_wb_line(unsigned long start) start 83 arch/csky/mm/cachev1.c cache_op_line(start, DATA_CACHE|CACHE_CLR); start 87 arch/csky/mm/cachev1.c void icache_inv_range(unsigned long start, unsigned long end) start 89 arch/csky/mm/cachev1.c cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); start 97 arch/csky/mm/cachev1.c void dcache_wb_range(unsigned long start, unsigned long end) start 99 arch/csky/mm/cachev1.c cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); start 107 arch/csky/mm/cachev1.c void cache_wbinv_range(unsigned long start, unsigned long end) start 109 arch/csky/mm/cachev1.c cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); start 118 arch/csky/mm/cachev1.c void dma_wbinv_range(unsigned long start, unsigned long end) start 120 arch/csky/mm/cachev1.c cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); start 123 arch/csky/mm/cachev1.c void dma_inv_range(unsigned long start, unsigned long end) start 125 arch/csky/mm/cachev1.c cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); start 128 arch/csky/mm/cachev1.c void dma_wb_range(unsigned long start, unsigned long end) start 130 arch/csky/mm/cachev1.c cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); start 9 arch/csky/mm/cachev2.c inline void dcache_wb_line(unsigned long start) start 11 arch/csky/mm/cachev2.c asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); start 15 arch/csky/mm/cachev2.c void icache_inv_range(unsigned long start, unsigned long end) start 17 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 30 arch/csky/mm/cachev2.c void dcache_wb_range(unsigned long start, unsigned long end) start 32 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 39 arch/csky/mm/cachev2.c void dcache_inv_range(unsigned long start, unsigned long end) start 41 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 48 arch/csky/mm/cachev2.c void cache_wbinv_range(unsigned long start, unsigned long end) start 50 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 56 arch/csky/mm/cachev2.c i = start & ~(L1_CACHE_BYTES - 1); start 63 arch/csky/mm/cachev2.c void dma_wbinv_range(unsigned long start, unsigned long end) start 65 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 72 arch/csky/mm/cachev2.c void dma_inv_range(unsigned long start, unsigned long end) start 74 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 81 arch/csky/mm/cachev2.c void dma_wb_range(unsigned long start, unsigned long end) start 83 arch/csky/mm/cachev2.c unsigned long i = start & ~(L1_CACHE_BYTES - 1); start 18 arch/csky/mm/dma-mapping.c void (*fn)(unsigned long start, unsigned long end)) start 21 arch/csky/mm/dma-mapping.c void *start = __va(page_to_phys(page)); start 32 arch/csky/mm/dma-mapping.c start = kmap_atomic(page); start 34 arch/csky/mm/dma-mapping.c fn((unsigned long)start + offset, start 35 arch/csky/mm/dma-mapping.c (unsigned long)start + offset + len); start 37 arch/csky/mm/dma-mapping.c kunmap_atomic(start); start 39 arch/csky/mm/dma-mapping.c fn((unsigned long)start + offset, start 40 arch/csky/mm/dma-mapping.c (unsigned long)start + offset + len); start 45 arch/csky/mm/dma-mapping.c start += PAGE_SIZE; start 50 arch/csky/mm/dma-mapping.c static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end) start 52 arch/csky/mm/dma-mapping.c memset((void *)start, 0, end - start); start 53 arch/csky/mm/dma-mapping.c dma_wbinv_range(start, end); start 120 arch/csky/mm/highmem.c static void __init fixrange_init(unsigned long start, unsigned long end, start 131 arch/csky/mm/highmem.c vaddr = start; start 47 arch/csky/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 52 arch/csky/mm/tlb.c start &= TLB_ENTRY_SIZE_MASK; start 57 arch/csky/mm/tlb.c while (start < end) { start 58 arch/csky/mm/tlb.c asm volatile("tlbi.vas %0"::"r"(start | newpid)); start 59 arch/csky/mm/tlb.c start += 2*PAGE_SIZE; start 68 arch/csky/mm/tlb.c while (start < end) { start 71 arch/csky/mm/tlb.c write_mmu_entryhi(start | newpid); start 72 arch/csky/mm/tlb.c start += 2*PAGE_SIZE; start 84 arch/csky/mm/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 86 arch/csky/mm/tlb.c start &= TLB_ENTRY_SIZE_MASK; start 91 arch/csky/mm/tlb.c while (start < end) { start 92 arch/csky/mm/tlb.c asm volatile("tlbi.vaas %0"::"r"(start)); start 93 arch/csky/mm/tlb.c start += 2*PAGE_SIZE; start 102 arch/csky/mm/tlb.c while (start < end) { start 105 arch/csky/mm/tlb.c write_mmu_entryhi(start | oldpid); start 106 arch/csky/mm/tlb.c start += 2*PAGE_SIZE; start 164 arch/h8300/kernel/setup.c .start = c_start, start 31 arch/hexagon/include/asm/cacheflush.h #define flush_cache_range(vma, start, end) do { } while (0) start 39 arch/hexagon/include/asm/cacheflush.h #define flush_cache_vmap(start, end) do { } while (0) start 40 arch/hexagon/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 45 arch/hexagon/include/asm/cacheflush.h extern void flush_dcache_range(unsigned long start, unsigned long end); start 50 arch/hexagon/include/asm/cacheflush.h extern void flush_icache_range(unsigned long start, unsigned long end); start 85 arch/hexagon/include/asm/cacheflush.h extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); start 86 arch/hexagon/include/asm/cacheflush.h extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); start 27 arch/hexagon/include/asm/io.h extern int remap_area_pages(unsigned long start, unsigned long phys_addr, start 28 arch/hexagon/include/asm/tlbflush.h unsigned long start, unsigned long end); start 29 arch/hexagon/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 43 arch/hexagon/include/asm/tlbflush.h #define flush_tlb_pgtables(mm, start, end) start 133 arch/hexagon/kernel/setup.c .start = &c_start, start 38 arch/hexagon/kernel/time.c .start = RTOS_TIMER_REGS_ADDR, start 173 arch/hexagon/kernel/time.c rtos_timer = ioremap(resource->start, resource_size(resource)); start 176 arch/hexagon/kernel/time.c release_mem_region(resource->start, resource_size(resource)); start 208 arch/hexagon/kernel/time.c unsigned long long start = __vmgettime(); start 210 arch/hexagon/kernel/time.c while ((__vmgettime() - start) < cycles) start 223 arch/hexagon/kernel/time.c unsigned long long start = __vmgettime(); start 226 arch/hexagon/kernel/time.c while ((__vmgettime() - start) < finish) start 88 arch/hexagon/lib/checksum.c int i, start, mid, end, mask; start 96 arch/hexagon/lib/checksum.c start = 0xF & (16-(((int) ptr) & 0xF)) ; start 98 arch/hexagon/lib/checksum.c start = start & mask ; start 100 arch/hexagon/lib/checksum.c mid = len - start; start 106 arch/hexagon/lib/checksum.c if (start & 1) start 108 arch/hexagon/lib/checksum.c ptr2 = (unsigned short *) &ptr[start & 1]; start 109 arch/hexagon/lib/checksum.c if (start & 2) start 111 arch/hexagon/lib/checksum.c ptr4 = (unsigned int *) &ptr[start & 3]; start 112 arch/hexagon/lib/checksum.c if (start & 4) { start 118 arch/hexagon/lib/checksum.c ptr8 = (u64 *) &ptr[start & 7]; start 119 arch/hexagon/lib/checksum.c if (start & 8) { start 125 arch/hexagon/lib/checksum.c ptr8_o = (u64 *) (ptr + start); start 126 arch/hexagon/lib/checksum.c ptr8_e = (u64 *) (ptr + start + 8); start 148 arch/hexagon/lib/checksum.c ptr4 = (unsigned int *) &ptr[start + (mid * 16) + (end & 8)]; start 155 arch/hexagon/lib/checksum.c ptr2 = (unsigned short *) &ptr[start + (mid * 16) + (end & 12)]; start 160 arch/hexagon/lib/checksum.c sum1 += (u64) ptr[start + (mid * 16) + (end & 14)]; start 162 arch/hexagon/lib/checksum.c ptr8 = (u64 *) &ptr[start + (mid * 16)]; start 174 arch/hexagon/lib/checksum.c if (start & 1) start 12 arch/hexagon/mm/cache.c #define spanlines(start, end) \ start 13 arch/hexagon/mm/cache.c (((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1) start 15 arch/hexagon/mm/cache.c void flush_dcache_range(unsigned long start, unsigned long end) start 17 arch/hexagon/mm/cache.c unsigned long lines = spanlines(start, end-1); start 20 arch/hexagon/mm/cache.c start &= ~(LINESIZE - 1); start 28 arch/hexagon/mm/cache.c : "r" (start) start 30 arch/hexagon/mm/cache.c start += LINESIZE; start 35 arch/hexagon/mm/cache.c void flush_icache_range(unsigned long start, unsigned long end) start 37 arch/hexagon/mm/cache.c unsigned long lines = spanlines(start, end-1); start 40 arch/hexagon/mm/cache.c start &= ~(LINESIZE - 1); start 49 arch/hexagon/mm/cache.c : "r" (start) start 51 arch/hexagon/mm/cache.c start += LINESIZE; start 60 arch/hexagon/mm/cache.c void hexagon_clean_dcache_range(unsigned long start, unsigned long end) start 62 arch/hexagon/mm/cache.c unsigned long lines = spanlines(start, end-1); start 65 arch/hexagon/mm/cache.c start &= ~(LINESIZE - 1); start 73 arch/hexagon/mm/cache.c : "r" (start) start 75 arch/hexagon/mm/cache.c start += LINESIZE; start 80 arch/hexagon/mm/cache.c void hexagon_inv_dcache_range(unsigned long start, unsigned long end) start 82 arch/hexagon/mm/cache.c unsigned long lines = spanlines(start, end-1); start 85 arch/hexagon/mm/cache.c start &= ~(LINESIZE - 1); start 93 arch/hexagon/mm/cache.c : "r" (start) start 95 arch/hexagon/mm/cache.c start += LINESIZE; start 25 arch/hexagon/mm/vm_tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 31 arch/hexagon/mm/vm_tlb.c __vmclrmap((void *)start, end - start); start 78 arch/hexagon/mm/vm_tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 80 arch/hexagon/mm/vm_tlb.c __vmclrmap((void *)start, end - start); start 1911 arch/ia64/hp/common/sba_iommu.c .start = ioc_start, start 23 arch/ia64/include/asm/cacheflush.h #define flush_cache_range(vma, start, end) do { } while (0) start 26 arch/ia64/include/asm/cacheflush.h #define flush_cache_vmap(start, end) do { } while (0) start 27 arch/ia64/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 38 arch/ia64/include/asm/cacheflush.h extern void flush_icache_range (unsigned long start, unsigned long end); start 27 arch/ia64/include/asm/meminit.h u64 start; /* virtual address of beginning of element */ start 37 arch/ia64/include/asm/meminit.h extern int filter_rsvd_memory (u64 start, u64 end, void *arg); start 38 arch/ia64/include/asm/meminit.h extern int filter_memory (u64 start, u64 end, void *arg); start 43 arch/ia64/include/asm/meminit.h extern int reserve_elfcorehdr(u64 *start, u64 *end); start 52 arch/ia64/include/asm/meminit.h extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); start 54 arch/ia64/include/asm/meminit.h # define call_pernode_memory(start, len, func) (*func)(start, len, 0) start 59 arch/ia64/include/asm/meminit.h extern int register_active_ranges(u64 start, u64 len, int nid); start 65 arch/ia64/include/asm/meminit.h extern int find_largest_hole(u64 start, u64 end, void *arg); start 66 arch/ia64/include/asm/meminit.h extern int create_mem_map_page_table(u64 start, u64 end, void *arg); start 311 arch/ia64/include/asm/pal.h start : 8, /* 47-40 lsb of data to start 332 arch/ia64/include/asm/pal.h #define pclid_write_start pclid_info_write.start start 22 arch/ia64/include/asm/patch.h extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end); start 23 arch/ia64/include/asm/patch.h extern void ia64_patch_vtop (unsigned long start, unsigned long end); start 25 arch/ia64/include/asm/patch.h extern void ia64_patch_rse (unsigned long start, unsigned long end); start 95 arch/ia64/include/asm/tlbflush.h extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); start 122 arch/ia64/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, start 276 arch/ia64/kernel/efi.c u64 start; start 287 arch/ia64/kernel/efi.c return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); start 312 arch/ia64/kernel/efi.c u64 start, end, voff; start 315 arch/ia64/kernel/efi.c for (k = kern_memmap; k->start != ~0UL; k++) { start 318 arch/ia64/kernel/efi.c start = PAGE_ALIGN(k->start); start 319 arch/ia64/kernel/efi.c end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; start 320 arch/ia64/kernel/efi.c if (start < end) start 321 arch/ia64/kernel/efi.c if ((*callback)(start + voff, end + voff, arg) < 0) start 715 arch/ia64/kernel/efi.c for (md = kern_memmap; md->start != ~0UL; md++) { start 716 arch/ia64/kernel/efi.c if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) start 1072 arch/ia64/kernel/efi.c k->start = md->phys_addr; start 1111 arch/ia64/kernel/efi.c k->start = md->phys_addr; start 1130 arch/ia64/kernel/efi.c k->start = lim; start 1158 arch/ia64/kernel/efi.c k->start = as; start 1163 arch/ia64/kernel/efi.c k->start = ~0L; /* end-marker */ start 1253 arch/ia64/kernel/efi.c res->start = md->phys_addr; start 1272 arch/ia64/kernel/efi.c if (crashk_res.end > crashk_res.start) start 1287 arch/ia64/kernel/efi.c u64 start, end; start 1301 arch/ia64/kernel/efi.c start = ALIGN(md->phys_addr, alignment); start 1304 arch/ia64/kernel/efi.c if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { start 1305 arch/ia64/kernel/efi.c if (__pa(r[i].start) > start + size) start 1306 arch/ia64/kernel/efi.c return start; start 1307 arch/ia64/kernel/efi.c start = ALIGN(__pa(r[i].end), alignment); start 1309 arch/ia64/kernel/efi.c __pa(r[i+1].start) < start + size) start 1315 arch/ia64/kernel/efi.c if (end > start + size) start 1316 arch/ia64/kernel/efi.c return start; start 38 arch/ia64/kernel/machine_kexec.c .start = 0, start 45 arch/ia64/kernel/machine_kexec.c .start = 0, start 131 arch/ia64/kernel/machine_kexec.c (*rnk)(image->head, image->start, ia64_boot_param, start 849 arch/ia64/kernel/module.c struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr; start 850 arch/ia64/kernel/module.c struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start); start 855 arch/ia64/kernel/module.c for (e1 = start; e1 < end; ++e1) start 865 arch/ia64/kernel/module.c for (e1 = start; e1 < end; ++e1) { start 877 arch/ia64/kernel/module.c if (in_init(mod, start->start_offset)) { start 878 arch/ia64/kernel/module.c init = start; start 879 arch/ia64/kernel/module.c core = start + num_init; start 881 arch/ia64/kernel/module.c core = start; start 882 arch/ia64/kernel/module.c init = start + num_core; start 101 arch/ia64/kernel/patch.c ia64_patch_vtop (unsigned long start, unsigned long end) start 103 arch/ia64/kernel/patch.c s32 *offp = (s32 *) start; start 124 arch/ia64/kernel/patch.c ia64_patch_rse (unsigned long start, unsigned long end) start 126 arch/ia64/kernel/patch.c s32 *offp = (s32 *) start; start 142 arch/ia64/kernel/patch.c ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) start 146 arch/ia64/kernel/patch.c s32 *offp = (s32 *) start; start 173 arch/ia64/kernel/patch.c patch_fsyscall_table (unsigned long start, unsigned long end) start 176 arch/ia64/kernel/patch.c s32 *offp = (s32 *) start; start 190 arch/ia64/kernel/patch.c patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) start 193 arch/ia64/kernel/patch.c s32 *offp = (s32 *) start; start 5656 arch/ia64/kernel/perfmon.c .start = pfm_proc_start, start 1754 arch/ia64/kernel/ptrace.c int index, start, end; start 1771 arch/ia64/kernel/ptrace.c start = dst->pos; start 1780 arch/ia64/kernel/ptrace.c if (start & 0xF) { /* only write high part */ start 1781 arch/ia64/kernel/ptrace.c if (unw_get_fr(info, start / sizeof(elf_fpreg_t), start 1786 arch/ia64/kernel/ptrace.c tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] start 1788 arch/ia64/kernel/ptrace.c start &= ~0xFUL; start 1801 arch/ia64/kernel/ptrace.c for ( ; start < end ; start += sizeof(elf_fpreg_t)) { start 1802 arch/ia64/kernel/ptrace.c index = start / sizeof(elf_fpreg_t); start 143 arch/ia64/kernel/setup.c filter_rsvd_memory (u64 start, u64 end, void *arg) start 150 arch/ia64/kernel/setup.c if (start == PAGE_OFFSET) { start 152 arch/ia64/kernel/setup.c start += PAGE_SIZE; start 153 arch/ia64/kernel/setup.c if (start >= end) return 0; start 163 arch/ia64/kernel/setup.c range_start = max(start, prev_start); start 164 arch/ia64/kernel/setup.c range_end = min(end, rsvd_region[i].start); start 183 arch/ia64/kernel/setup.c filter_memory(u64 start, u64 end, void *arg) start 188 arch/ia64/kernel/setup.c if (start == PAGE_OFFSET) { start 190 arch/ia64/kernel/setup.c start += PAGE_SIZE; start 191 arch/ia64/kernel/setup.c if (start >= end) start 196 arch/ia64/kernel/setup.c if (start < end) start 197 arch/ia64/kernel/setup.c call_pernode_memory(__pa(start), end - start, func); start 209 arch/ia64/kernel/setup.c if (rsvd_region[j].start > rsvd_region[j+1].start) { start 225 arch/ia64/kernel/setup.c if (rsvd_region[i].start >= rsvd_region[i-1].end) start 241 arch/ia64/kernel/setup.c code_resource.start = ia64_tpa(_text); start 243 arch/ia64/kernel/setup.c data_resource.start = ia64_tpa(_etext); start 245 arch/ia64/kernel/setup.c bss_resource.start = ia64_tpa(__bss_start); start 306 arch/ia64/kernel/setup.c rsvd_region[*n].start = start 311 arch/ia64/kernel/setup.c crashk_res.start = base; start 315 arch/ia64/kernel/setup.c efi_memmap_res.start = ia64_boot_param->efi_memmap; start 316 arch/ia64/kernel/setup.c efi_memmap_res.end = efi_memmap_res.start + start 318 arch/ia64/kernel/setup.c boot_param_res.start = __pa(ia64_boot_param); start 319 arch/ia64/kernel/setup.c boot_param_res.end = boot_param_res.start + start 343 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) ia64_boot_param; start 344 arch/ia64/kernel/setup.c rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); start 347 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); start 348 arch/ia64/kernel/setup.c rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; start 351 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); start 352 arch/ia64/kernel/setup.c rsvd_region[n].end = (rsvd_region[n].start start 356 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); start 362 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); start 363 arch/ia64/kernel/setup.c rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; start 369 arch/ia64/kernel/setup.c if (reserve_elfcorehdr(&rsvd_region[n].start, start 374 arch/ia64/kernel/setup.c total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); start 380 arch/ia64/kernel/setup.c rsvd_region[n].start = ~0UL; start 393 arch/ia64/kernel/setup.c phys_addr_t addr = __pa(region->start); start 394 arch/ia64/kernel/setup.c phys_addr_t size = region->end - region->start; start 525 arch/ia64/kernel/setup.c int __init reserve_elfcorehdr(u64 *start, u64 *end) start 543 arch/ia64/kernel/setup.c *start = (unsigned long)__va(elfcorehdr_addr); start 544 arch/ia64/kernel/setup.c *end = *start + length; start 763 arch/ia64/kernel/setup.c .start = c_start, start 410 arch/ia64/kernel/time.c unsigned long start = ia64_get_itc(); start 411 arch/ia64/kernel/time.c unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; start 1540 arch/ia64/kernel/unwind.c STAT(unsigned long start, parse_start;) start 1542 arch/ia64/kernel/unwind.c STAT(++unw.stat.script.builds; start = ia64_get_itc()); start 1554 arch/ia64/kernel/unwind.c STAT(unw.stat.script.build_time += ia64_get_itc() - start); start 1565 arch/ia64/kernel/unwind.c if (ip >= table->start && ip < table->end) { start 1594 arch/ia64/kernel/unwind.c STAT(unw.stat.script.build_time += ia64_get_itc() - start); start 1710 arch/ia64/kernel/unwind.c STAT(unw.stat.script.build_time += ia64_get_itc() - start); start 1725 arch/ia64/kernel/unwind.c STAT(unsigned long start;) start 1727 arch/ia64/kernel/unwind.c STAT(++unw.stat.script.runs; start = ia64_get_itc()); start 1817 arch/ia64/kernel/unwind.c STAT(unw.stat.script.run_time += ia64_get_itc() - start); start 1887 arch/ia64/kernel/unwind.c STAT(unsigned long start, flags;) start 1890 arch/ia64/kernel/unwind.c STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc()); start 1901 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 1908 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 1915 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 1937 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 1946 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 1953 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 1964 arch/ia64/kernel/unwind.c STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); start 2005 arch/ia64/kernel/unwind.c STAT(unsigned long start, flags;) start 2007 arch/ia64/kernel/unwind.c STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc()); start 2048 arch/ia64/kernel/unwind.c STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); start 2085 arch/ia64/kernel/unwind.c const struct unw_table_entry *start = table_start, *end = table_end; start 2090 arch/ia64/kernel/unwind.c table->start = segment_base + start[0].start_offset; start 2092 arch/ia64/kernel/unwind.c table->array = start; start 2093 arch/ia64/kernel/unwind.c table->length = end - start; start 2100 arch/ia64/kernel/unwind.c const struct unw_table_entry *start = table_start, *end = table_end; start 2104 arch/ia64/kernel/unwind.c if (end - start <= 0) { start 2170 arch/ia64/kernel/unwind.c || tmp->ip < table->start || tmp->ip >= table->end) start 2175 arch/ia64/kernel/unwind.c if (tmp->ip >= table->start && tmp->ip < table->end) { start 2189 arch/ia64/kernel/unwind.c const struct unw_table_entry *entry, *start, *end; start 2207 arch/ia64/kernel/unwind.c start = (const struct unw_table_entry *) punw->p_vaddr; start 2208 arch/ia64/kernel/unwind.c end = (struct unw_table_entry *) ((char *) start + punw->p_memsz); start 2211 arch/ia64/kernel/unwind.c unw_add_unwind_table("linux-gate.so", segbase, 0, start, end); start 2213 arch/ia64/kernel/unwind.c for (entry = start; entry < end; ++entry) start 2228 arch/ia64/kernel/unwind.c for (entry = start; entry < end; ++entry, lp += 3) { start 54 arch/ia64/kernel/unwind_i.h unsigned long start; start 71 arch/ia64/mm/discontig.c static int __init build_node_maps(unsigned long start, unsigned long len, start 74 arch/ia64/mm/discontig.c unsigned long spfn, epfn, end = start + len; start 77 arch/ia64/mm/discontig.c spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; start 313 arch/ia64/mm/discontig.c static int __init find_pernode_space(unsigned long start, unsigned long len, start 319 arch/ia64/mm/discontig.c spfn = start >> PAGE_SHIFT; start 320 arch/ia64/mm/discontig.c epfn = (start + len) >> PAGE_SHIFT; start 338 arch/ia64/mm/discontig.c pernode = NODEDATA_ALIGN(start, node); start 341 arch/ia64/mm/discontig.c if (start + len > (pernode + pernodesize)) start 555 arch/ia64/mm/discontig.c void call_pernode_memory(unsigned long start, unsigned long len, void *arg) start 557 arch/ia64/mm/discontig.c unsigned long rs, re, end = start + len; start 561 arch/ia64/mm/discontig.c start = PAGE_ALIGN(start); start 563 arch/ia64/mm/discontig.c if (start >= end) start 570 arch/ia64/mm/discontig.c if (start < end) start 571 arch/ia64/mm/discontig.c (*func)(start, end - start, 0); start 576 arch/ia64/mm/discontig.c rs = max(start, node_memblk[i].start_paddr); start 656 arch/ia64/mm/discontig.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 659 arch/ia64/mm/discontig.c return vmemmap_populate_basepages(start, end, node); start 662 arch/ia64/mm/discontig.c void vmemmap_free(unsigned long start, unsigned long end, start 158 arch/ia64/mm/init.c free_initrd_mem (unsigned long start, unsigned long end) start 191 arch/ia64/mm/init.c start = PAGE_ALIGN(start); start 194 arch/ia64/mm/init.c if (start < end) start 195 arch/ia64/mm/init.c printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); start 197 arch/ia64/mm/init.c for (; start < end; start += PAGE_SIZE) { start 198 arch/ia64/mm/init.c if (!virt_addr_valid(start)) start 200 arch/ia64/mm/init.c free_reserved_page(virt_to_page(start)); start 427 arch/ia64/mm/init.c int __init create_mem_map_page_table(u64 start, u64 end, void *arg) start 437 arch/ia64/mm/init.c map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); start 442 arch/ia64/mm/init.c node = paddr_to_nid(__pa(start)); start 488 arch/ia64/mm/init.c struct page *start; start 495 arch/ia64/mm/init.c virtual_memmap_init(u64 start, u64 end, void *arg) start 501 arch/ia64/mm/init.c map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); start 504 arch/ia64/mm/init.c if (map_start < args->start) start 505 arch/ia64/mm/init.c map_start = args->start; start 533 arch/ia64/mm/init.c struct page *start; start 536 arch/ia64/mm/init.c start = pfn_to_page(start_pfn); start 537 arch/ia64/mm/init.c args.start = start; start 538 arch/ia64/mm/init.c args.end = start + size; start 558 arch/ia64/mm/init.c int __init find_largest_hole(u64 start, u64 end, void *arg) start 566 arch/ia64/mm/init.c if (*max_gap < (start - last_end)) start 567 arch/ia64/mm/init.c *max_gap = start - last_end; start 574 arch/ia64/mm/init.c int __init register_active_ranges(u64 start, u64 len, int nid) start 576 arch/ia64/mm/init.c u64 end = start + len; start 579 arch/ia64/mm/init.c if (start > crashk_res.start && start < crashk_res.end) start 580 arch/ia64/mm/init.c start = crashk_res.end; start 581 arch/ia64/mm/init.c if (end > crashk_res.start && end < crashk_res.end) start 582 arch/ia64/mm/init.c end = crashk_res.start; start 585 arch/ia64/mm/init.c if (start < end) start 586 arch/ia64/mm/init.c memblock_add_node(__pa(start), end - start, nid); start 591 arch/ia64/mm/init.c find_max_min_low_pfn (u64 start, u64 end, void *arg) start 595 arch/ia64/mm/init.c pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; start 598 arch/ia64/mm/init.c pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; start 672 arch/ia64/mm/init.c int arch_add_memory(int nid, u64 start, u64 size, start 675 arch/ia64/mm/init.c unsigned long start_pfn = start >> PAGE_SHIFT; start 687 arch/ia64/mm/init.c void arch_remove_memory(int nid, u64 start, u64 size, start 690 arch/ia64/mm/init.c unsigned long start_pfn = start >> PAGE_SHIFT; start 250 arch/ia64/mm/tlb.c ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, start 274 arch/ia64/mm/tlb.c ia64_ptcga(start, (nbits << 2)); start 276 arch/ia64/mm/tlb.c start += (1UL << nbits); start 277 arch/ia64/mm/tlb.c } while (start < end); start 312 arch/ia64/mm/tlb.c __flush_tlb_range (struct vm_area_struct *vma, unsigned long start, start 316 arch/ia64/mm/tlb.c unsigned long size = end - start; start 332 arch/ia64/mm/tlb.c start &= ~((1UL << nbits) - 1); start 337 arch/ia64/mm/tlb.c ia64_global_tlb_purge(mm, start, end, nbits); start 343 arch/ia64/mm/tlb.c ia64_ptcl(start, (nbits<<2)); start 344 arch/ia64/mm/tlb.c start += (1UL << nbits); start 345 arch/ia64/mm/tlb.c } while (start < end); start 351 arch/ia64/mm/tlb.c unsigned long start, unsigned long end) start 353 arch/ia64/mm/tlb.c if (unlikely(end - start >= 1024*1024*1024*1024UL start 354 arch/ia64/mm/tlb.c || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) { start 363 arch/ia64/mm/tlb.c __flush_tlb_range(vma, start, end); start 365 arch/ia64/mm/tlb.c __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end)); start 85 arch/ia64/oprofile/perfmon.c ops->start = perfmon_start; start 70 arch/ia64/pci/fixup.c res->start = 0xC0000; start 71 arch/ia64/pci/fixup.c res->end = res->start + 0x20000 - 1; start 175 arch/ia64/pci/pci.c min = res->start - entry->offset; start 193 arch/ia64/pci/pci.c resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min); start 203 arch/ia64/pci/pci.c res->start = min + base_port; start 231 arch/ia64/pci/pci.c res->start == 0xCF8 && res->end == 0xCFF; start 299 arch/ia64/pci/pci.c root->segment, (int)root->secondary.start); start 336 arch/ia64/pci/pci.c if (!r->flags || r->parent || !r->start) start 354 arch/ia64/pci/pci.c if (!r->flags || r->parent || !r->start) start 28 arch/m68k/amiga/chipram.c .name = "Chip RAM", .start = CHIP_PHYSADDR start 91 arch/m68k/amiga/chipram.c return ZTWO_VADDR(res->start); start 96 arch/m68k/amiga/chipram.c unsigned long start = ZTWO_PADDR(ptr); start 100 arch/m68k/amiga/chipram.c res = lookup_resource(&chipram_res, start); start 123 arch/m68k/amiga/config.c .name = "CIA B", .start = 0x00bfd000, .end = 0x00bfdfff start 126 arch/m68k/amiga/config.c .name = "CIA A", .start = 0x00bfe000, .end = 0x00bfefff start 129 arch/m68k/amiga/config.c .name = "Custom I/O", .start = 0x00dff000, .end = 0x00dfffff start 132 arch/m68k/amiga/config.c .name = "Kickstart ROM", .start = 0x00f80000, .end = 0x00ffffff start 445 arch/m68k/amiga/config.c ram_resource[i].start = m68k_memory[i].addr; start 492 arch/m68k/amiga/config.c .name = "timer", .start = 0x00bfd400, .end = 0x00bfd5ff, start 25 arch/m68k/amiga/platform.c .start = 0x00e80000, start 30 arch/m68k/amiga/platform.c .start = 0x00200000, start 37 arch/m68k/amiga/platform.c .start = 0xff000000, start 42 arch/m68k/amiga/platform.c .start = 0x40000000, start 88 arch/m68k/amiga/platform.c .start = 0xdd0000, start 95 arch/m68k/amiga/platform.c .start = 0xdd0000, start 102 arch/m68k/amiga/platform.c .start = 0xda0000, start 115 arch/m68k/amiga/platform.c .start = 0xdd2000, start 128 arch/m68k/amiga/platform.c .start = 0x00dc0000, start 678 arch/m68k/atari/config.c .start = ATARI_ETHERNAT_PHYS_ADDR, start 684 arch/m68k/atari/config.c .start = ATARI_ETHERNAT_IRQ, start 707 arch/m68k/atari/config.c .start = ATARI_USB_PHYS_ADDR, start 713 arch/m68k/atari/config.c .start = ATARI_USB_PHYS_ADDR + 0x4, start 719 arch/m68k/atari/config.c .start = ATARI_USB_IRQ, start 772 arch/m68k/atari/config.c .start = ATARI_ETHERNEC_BASE, start 778 arch/m68k/atari/config.c .start = ATARI_ETHERNEC_IRQ, start 802 arch/m68k/atari/config.c .start = ATARI_NETUSBEE_BASE, start 808 arch/m68k/atari/config.c .start = ATARI_NETUSBEE_BASE + 0x20, start 814 arch/m68k/atari/config.c .start = ATARI_NETUSBEE_IRQ, start 858 arch/m68k/atari/config.c .start = IRQ_MFP_FSCSI, start 866 arch/m68k/atari/config.c .start = IRQ_TT_MFP_SCSI, start 98 arch/m68k/atari/stram.c stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, start 100 arch/m68k/atari/stram.c if (!stram_pool.start) start 104 arch/m68k/atari/stram.c stram_pool.end = stram_pool.start + pool_size - 1; start 126 arch/m68k/atari/stram.c stram_pool.start = PAGE_SIZE; start 127 arch/m68k/atari/stram.c stram_pool.end = stram_pool.start + pool_size - 1; start 129 arch/m68k/atari/stram.c stram_virt_offset = (unsigned long) ioremap(stram_pool.start, start 130 arch/m68k/atari/stram.c resource_size(&stram_pool)) - stram_pool.start; start 180 arch/m68k/atari/stram.c return atari_stram_to_virt(res->start); start 187 arch/m68k/atari/stram.c unsigned long start = atari_stram_to_phys(addr); start 191 arch/m68k/atari/stram.c res = lookup_resource(&stram_pool, start); start 37 arch/m68k/coldfire/amcore.c .start = DM9000_ADDR, start 46 arch/m68k/coldfire/amcore.c .start = DM9000_ADDR + 4, start 52 arch/m68k/coldfire/amcore.c .start = DM9000_IRQ, start 107 arch/m68k/coldfire/amcore.c .start = 0xffc00000, start 115 arch/m68k/coldfire/device.c .start = MCFFEC_BASE0, start 120 arch/m68k/coldfire/device.c .start = MCF_IRQ_FECRX0, start 125 arch/m68k/coldfire/device.c .start = MCF_IRQ_FECTX0, start 130 arch/m68k/coldfire/device.c .start = MCF_IRQ_FECENTC0, start 151 arch/m68k/coldfire/device.c .start = MCFFEC_BASE1, start 156 arch/m68k/coldfire/device.c .start = MCF_IRQ_FECRX1, start 161 arch/m68k/coldfire/device.c .start = MCF_IRQ_FECTX1, start 166 arch/m68k/coldfire/device.c .start = MCF_IRQ_FECENTC1, start 193 arch/m68k/coldfire/device.c .start = MCFQSPI_BASE, start 198 arch/m68k/coldfire/device.c .start = MCF_IRQ_QSPI, start 344 arch/m68k/coldfire/device.c .start = MCFI2C_BASE0, start 349 arch/m68k/coldfire/device.c .start = MCF_IRQ_I2C0, start 365 arch/m68k/coldfire/device.c .start = MCFI2C_BASE1, start 370 arch/m68k/coldfire/device.c .start = MCF_IRQ_I2C1, start 389 arch/m68k/coldfire/device.c .start = MCFI2C_BASE2, start 394 arch/m68k/coldfire/device.c .start = MCF_IRQ_I2C2, start 413 arch/m68k/coldfire/device.c .start = MCFI2C_BASE3, start 418 arch/m68k/coldfire/device.c .start = MCF_IRQ_I2C3, start 437 arch/m68k/coldfire/device.c .start = MCFI2C_BASE4, start 442 arch/m68k/coldfire/device.c .start = MCF_IRQ_I2C4, start 461 arch/m68k/coldfire/device.c .start = MCFI2C_BASE5, start 466 arch/m68k/coldfire/device.c .start = MCF_IRQ_I2C5, start 511 arch/m68k/coldfire/device.c .start = MCFEDMA_BASE, start 516 arch/m68k/coldfire/device.c .start = MCFEDMA_IRQ_INTR0, start 522 arch/m68k/coldfire/device.c .start = MCFEDMA_IRQ_INTR16, start 528 arch/m68k/coldfire/device.c .start = MCFEDMA_IRQ_INTR56, start 534 arch/m68k/coldfire/device.c .start = MCFEDMA_IRQ_ERR, start 62 arch/m68k/coldfire/firebee.c .start = FLASH_PHYS_ADDR, start 53 arch/m68k/coldfire/m5249.c .start = 0xe0000300, start 58 arch/m68k/coldfire/m5249.c .start = MCF_IRQ_GPIO6, start 20 arch/m68k/coldfire/mcf8390.c .start = NE2000_ADDR, start 25 arch/m68k/coldfire/mcf8390.c .start = NE2000_IRQ_VECTOR, start 45 arch/m68k/coldfire/nettel.c .start = NETTEL_SMC0_ADDR, start 50 arch/m68k/coldfire/nettel.c .start = NETTEL_SMC0_IRQ, start 58 arch/m68k/coldfire/nettel.c .start = NETTEL_SMC1_ADDR, start 63 arch/m68k/coldfire/nettel.c .start = NETTEL_SMC1_IRQ, start 134 arch/m68k/coldfire/pci.c .start = PCI_MEM_PA, start 141 arch/m68k/coldfire/pci.c .start = 0x400, start 148 arch/m68k/coldfire/pci.c .start = 0, start 65 arch/m68k/coldfire/stmark2.c .start = MCFDSPI_BASE0, start 70 arch/m68k/coldfire/stmark2.c .start = 12, start 75 arch/m68k/coldfire/stmark2.c .start = MCF_IRQ_DSPI0, start 330 arch/m68k/fpsp040/fpsp.h .set BUSY_FRAME,LV-BUSY_SIZE | start of busy frame start 336 arch/m68k/fpsp040/fpsp.h .set IDLE_FRAME,LV-IDLE_SIZE | start of idle frame start 31 arch/m68k/include/asm/cacheflush_mm.h static inline void clear_cf_icache(unsigned long start, unsigned long end) start 40 arch/m68k/include/asm/cacheflush_mm.h static inline void clear_cf_dcache(unsigned long start, unsigned long end) start 49 arch/m68k/include/asm/cacheflush_mm.h static inline void clear_cf_bcache(unsigned long start, unsigned long end) start 62 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cf_icache(unsigned long start, unsigned long end) start 66 arch/m68k/include/asm/cacheflush_mm.h for (set = start; set <= end; set += (0x10 - 3)) { start 80 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cf_dcache(unsigned long start, unsigned long end) start 84 arch/m68k/include/asm/cacheflush_mm.h for (set = start; set <= end; set += (0x10 - 3)) { start 98 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cf_bcache(unsigned long start, unsigned long end) start 102 arch/m68k/include/asm/cacheflush_mm.h for (set = start; set <= end; set += (0x10 - 3)) { start 193 arch/m68k/include/asm/cacheflush_mm.h #define flush_cache_vmap(start, end) flush_cache_all() start 194 arch/m68k/include/asm/cacheflush_mm.h #define flush_cache_vunmap(start, end) flush_cache_all() start 207 arch/m68k/include/asm/cacheflush_mm.h unsigned long start, start 226 arch/m68k/include/asm/cacheflush_mm.h unsigned long addr, start, end; start 228 arch/m68k/include/asm/cacheflush_mm.h start = addr & ICACHE_SET_MASK; start 230 arch/m68k/include/asm/cacheflush_mm.h if (start > end) { start 234 arch/m68k/include/asm/cacheflush_mm.h flush_cf_bcache(start, end); start 14 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_range(vma, start, end) do { } while (0) start 16 arch/m68k/include/asm/cacheflush_no.h #define flush_dcache_range(start, len) __flush_dcache_all() start 21 arch/m68k/include/asm/cacheflush_no.h #define flush_icache_range(start, len) __flush_icache_all() start 24 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_vmap(start, end) do { } while (0) start 25 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_vunmap(start, end) do { } while (0) start 39 arch/m68k/include/asm/module.h extern void module_fixup(struct module *mod, struct m68k_fixup_info *start, start 96 arch/m68k/include/asm/tlbflush.h unsigned long start, unsigned long end) start 102 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 193 arch/m68k/include/asm/tlbflush.h unsigned long start, unsigned long end) start 198 arch/m68k/include/asm/tlbflush.h start &= ~SUN3_PMEG_MASK; start 203 arch/m68k/include/asm/tlbflush.h while(start < end) start 205 arch/m68k/include/asm/tlbflush.h if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG) start 212 arch/m68k/include/asm/tlbflush.h sun3_put_segmap(start, SUN3_INVALID_PMEG); start 214 arch/m68k/include/asm/tlbflush.h start += SUN3_PMEG_SIZE; start 218 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 267 arch/m68k/include/asm/tlbflush.h unsigned long start, unsigned long end) start 36 arch/m68k/kernel/bootinfo_proc.c const void *start = bi; start 51 arch/m68k/kernel/bootinfo_proc.c memcpy(bootinfo_tmp, start, size); start 35 arch/m68k/kernel/machine_kexec.c unsigned long start, start 53 arch/m68k/kernel/machine_kexec.c pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start); start 57 arch/m68k/kernel/machine_kexec.c image->start, start 112 arch/m68k/kernel/module.c void module_fixup(struct module *mod, struct m68k_fixup_info *start, start 118 arch/m68k/kernel/module.c for (fixup = start; fixup < end; fixup++) { start 32 arch/m68k/kernel/pcibios.c resource_size_t start = res->start; start 34 arch/m68k/kernel/pcibios.c if ((res->flags & IORESOURCE_IO) && (start & 0x300)) start 35 arch/m68k/kernel/pcibios.c start = (start + 0x3ff) & ~0x3ff; start 37 arch/m68k/kernel/pcibios.c start = (start + align - 1) & ~(align - 1); start 39 arch/m68k/kernel/pcibios.c return start; start 60 arch/m68k/kernel/pcibios.c if (!r->start && r->end) { start 493 arch/m68k/kernel/setup_mm.c .start = c_start, start 217 arch/m68k/kernel/setup_no.c .start = c_start, start 833 arch/m68k/mac/config.c scc_a_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase + 2; start 834 arch/m68k/mac/config.c scc_a_rsrcs[0].end = scc_a_rsrcs[0].start; start 835 arch/m68k/mac/config.c scc_b_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase; start 836 arch/m68k/mac/config.c scc_b_rsrcs[0].end = scc_b_rsrcs[0].start; start 840 arch/m68k/mac/config.c scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC_A; start 841 arch/m68k/mac/config.c scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC_B; start 846 arch/m68k/mac/config.c scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC; start 847 arch/m68k/mac/config.c scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC; start 849 arch/m68k/mac/config.c scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_AUTO_4; start 850 arch/m68k/mac/config.c scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_AUTO_4; start 908 arch/m68k/mac/config.c .start = IRQ_MAC_SCSI, start 912 arch/m68k/mac/config.c .start = 0x50008000, start 916 arch/m68k/mac/config.c .start = 0x50008000, start 924 arch/m68k/mac/config.c .start = 0xFEE02000, start 932 arch/m68k/mac/config.c .start = IRQ_MAC_SCSI, start 936 arch/m68k/mac/config.c .start = 0x50010000, start 940 arch/m68k/mac/config.c .start = 0x50006000, start 948 arch/m68k/mac/config.c .start = IRQ_MAC_SCSI, start 952 arch/m68k/mac/config.c .start = 0x50F10000, start 956 arch/m68k/mac/config.c .start = 0x50F06000, start 994 arch/m68k/mac/config.c .start = swim_base, start 79 arch/m68k/mm/cache.c unsigned long start, end; start 80 arch/m68k/mm/cache.c start = address & ICACHE_SET_MASK; start 82 arch/m68k/mm/cache.c if (start > end) { start 86 arch/m68k/mm/cache.c flush_cf_icache(start, end); start 113 arch/m68k/mm/cache.c unsigned long start, end; start 114 arch/m68k/mm/cache.c start = addr & ICACHE_SET_MASK; start 116 arch/m68k/mm/cache.c if (start > end) { start 120 arch/m68k/mm/cache.c flush_cf_icache(start, end); start 172 arch/m68k/sun3/config.c .start = SUN3_VEC_VMESCSI0, start 176 arch/m68k/sun3/config.c .start = 0xff200000, start 180 arch/m68k/sun3/config.c .start = SUN3_VEC_VMESCSI1, start 184 arch/m68k/sun3/config.c .start = 0xff204000, start 196 arch/m68k/sun3/config.c .start = 2, start 200 arch/m68k/sun3/config.c .start = 0x00140000, start 43 arch/m68k/sun3/sun3dvma.c unsigned long start; start 95 arch/m68k/sun3/sun3dvma.c if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) start 99 arch/m68k/sun3/sun3dvma.c hole->start, hole->end, hole->size); start 122 arch/m68k/sun3/sun3dvma.c if(hole->end == prev->start) { start 188 arch/m68k/sun3/sun3dvma.c dvma_entry_use(hole->start) = newlen; start 193 arch/m68k/sun3/sun3dvma.c return hole->start; start 229 arch/m68k/sun3/sun3dvma.c } else if(hole->start == (baddr + len)) { start 230 arch/m68k/sun3/sun3dvma.c hole->start = baddr; start 239 arch/m68k/sun3/sun3dvma.c hole->start = baddr; start 264 arch/m68k/sun3/sun3dvma.c hole->start = DVMA_START; start 43 arch/m68k/tools/amiga/dmesg.c u_long start = CHIPMEM_START, end = CHIPMEM_END, p; start 50 arch/m68k/tools/amiga/dmesg.c for (p = start; p <= end-sizeof(struct savekmsg); p += 4) { start 60 arch/microblaze/include/asm/cacheflush.h #define flush_icache_range(start, end) mbc->iflr(start, end); start 62 arch/microblaze/include/asm/cacheflush.h #define invalidate_icache_range(start, end) mbc->iinr(start, end); start 71 arch/microblaze/include/asm/cacheflush.h #define invalidate_dcache_range(start, end) mbc->dinr(start, end); start 73 arch/microblaze/include/asm/cacheflush.h #define flush_dcache_range(start, end) mbc->dflr(start, end); start 88 arch/microblaze/include/asm/cacheflush.h #define flush_cache_vmap(start, end) do { } while (0) start 89 arch/microblaze/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 97 arch/microblaze/include/asm/cacheflush.h #define flush_cache_range(vma, start, len) { \ start 98 arch/microblaze/include/asm/cacheflush.h flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \ start 99 arch/microblaze/include/asm/cacheflush.h flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \ start 103 arch/microblaze/include/asm/cacheflush.h #define flush_cache_range(vma, start, len) do { } while (0) start 37 arch/microblaze/include/asm/tlbflush.h unsigned long start, unsigned long end) start 40 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_kernel_range(start, end) do { } while (0) start 55 arch/microblaze/include/asm/tlbflush.h unsigned long start, unsigned long end) { } start 63 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_range(mm, start, end) BUG() start 64 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_pgtables(mm, start, end) BUG() start 65 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_kernel_range(start, end) BUG() start 92 arch/microblaze/kernel/cpu/cache.c #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ start 95 arch/microblaze/kernel/cpu/cache.c if (start < UINT_MAX - cache_size) \ start 96 arch/microblaze/kernel/cpu/cache.c end = min(start + cache_size, end); \ start 97 arch/microblaze/kernel/cpu/cache.c start &= align; \ start 125 arch/microblaze/kernel/cpu/cache.c #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ start 131 arch/microblaze/kernel/cpu/cache.c count = end - start; \ start 137 arch/microblaze/kernel/cpu/cache.c : : "r" (start), "r" (count), \ start 142 arch/microblaze/kernel/cpu/cache.c #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ start 147 arch/microblaze/kernel/cpu/cache.c WARN_ON(end < start); \ start 153 arch/microblaze/kernel/cpu/cache.c : : "r" (temp), "r" (start), "r" (end), \ start 159 arch/microblaze/kernel/cpu/cache.c static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) start 166 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 168 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 175 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); start 177 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.icache_line_length) start 185 arch/microblaze/kernel/cpu/cache.c static void __flush_icache_range_nomsr_irq(unsigned long start, start 193 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 195 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 202 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); start 204 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.icache_line_length) start 213 arch/microblaze/kernel/cpu/cache.c static void __flush_icache_range_noirq(unsigned long start, start 220 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 222 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 225 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); start 227 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.icache_line_length) start 377 arch/microblaze/kernel/cpu/cache.c static void __invalidate_dcache_range_wb(unsigned long start, start 384 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 386 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 389 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); start 391 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.dcache_line_length) start 397 arch/microblaze/kernel/cpu/cache.c static void __invalidate_dcache_range_nomsr_wt(unsigned long start, start 404 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 405 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 409 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); start 411 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.dcache_line_length) start 417 arch/microblaze/kernel/cpu/cache.c static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, start 425 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 426 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 433 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); start 435 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.dcache_line_length) start 444 arch/microblaze/kernel/cpu/cache.c static void __invalidate_dcache_range_nomsr_irq(unsigned long start, start 452 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 454 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 461 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); start 463 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.dcache_line_length) start 489 arch/microblaze/kernel/cpu/cache.c static void __flush_dcache_range_wb(unsigned long start, unsigned long end) start 495 arch/microblaze/kernel/cpu/cache.c (unsigned int)start, (unsigned int) end); start 497 arch/microblaze/kernel/cpu/cache.c CACHE_LOOP_LIMITS(start, end, start 500 arch/microblaze/kernel/cpu/cache.c CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); start 502 arch/microblaze/kernel/cpu/cache.c for (i = start; i < end; i += cpuinfo.dcache_line_length) start 154 arch/microblaze/kernel/cpu/mb.c .start = c_start, start 196 arch/microblaze/pci/pci-common.c if (offset < (rp->start & PAGE_MASK) || start 235 arch/microblaze/pci/pci-common.c if (offset < rp->start || (offset + size) > rp->end) start 275 arch/microblaze/pci/pci-common.c if (offset < rp->start || (offset + size) > rp->end) start 346 arch/microblaze/pci/pci-common.c if (roffset < rp->start || (roffset + size) > rp->end) start 361 arch/microblaze/pci/pci-common.c resource_size_t *start, resource_size_t *end) start 368 arch/microblaze/pci/pci-common.c *start = region.start; start 380 arch/microblaze/pci/pci-common.c *start = rsrc->start; start 530 arch/microblaze/pci/pci-common.c res->start = range.cpu_addr; start 574 arch/microblaze/pci/pci-common.c if (res->start == 0) { start 577 arch/microblaze/pci/pci-common.c (unsigned long long)res->start, start 581 arch/microblaze/pci/pci-common.c res->end -= res->start; start 582 arch/microblaze/pci/pci-common.c res->start = 0; start 589 arch/microblaze/pci/pci-common.c (unsigned long long)res->start, start 615 arch/microblaze/pci/pci-common.c if (p->end < res->start) start 617 arch/microblaze/pci/pci-common.c if (res->end < p->start) start 619 arch/microblaze/pci/pci-common.c if (p->start < res->start || p->end > res->end) start 635 arch/microblaze/pci/pci-common.c (unsigned long long)p->start, start 685 arch/microblaze/pci/pci-common.c || res->start > res->end || res->parent) start 710 arch/microblaze/pci/pci-common.c (unsigned long long)res->start, start 737 arch/microblaze/pci/pci-common.c res->start = res->end = 0; start 751 arch/microblaze/pci/pci-common.c (unsigned long long)r->start, start 763 arch/microblaze/pci/pci-common.c (unsigned long long)pr->start, start 768 arch/microblaze/pci/pci-common.c r->end -= r->start; start 769 arch/microblaze/pci/pci-common.c r->start = 0; start 838 arch/microblaze/pci/pci-common.c res->start = offset; start 856 arch/microblaze/pci/pci-common.c if ((pres->start - offset) <= 0xa0000 && start 866 arch/microblaze/pci/pci-common.c res->start = 0xa0000 + offset; start 913 arch/microblaze/pci/pci-common.c res->start = (res->start + io_offset) & 0xffffffffu; start 921 arch/microblaze/pci/pci-common.c res->start = (unsigned long)hose->io_base_virt - isa_io_base; start 922 arch/microblaze/pci/pci-common.c res->end = res->start + IO_SPACE_LIMIT; start 929 arch/microblaze/pci/pci-common.c (unsigned long long)res->start, start 944 arch/microblaze/pci/pci-common.c res->start = hose->pci_mem_offset; start 952 arch/microblaze/pci/pci-common.c i, (unsigned long long)res->start, start 981 arch/microblaze/pci/pci-common.c bus->busn_res.start = hose->first_busno; start 57 arch/microblaze/pci/xilinx_pci.c dev->resource[i].start = 0; start 145 arch/microblaze/pci/xilinx_pci.c setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, start 146 arch/microblaze/pci/xilinx_pci.c r.start + XPLB_PCI_DATA, start 99 arch/mips/alchemy/board-gpr.c .start = 1, start 164 arch/mips/alchemy/board-gpr.c .start = 0x1e000000, start 252 arch/mips/alchemy/board-gpr.c .start = AU1500_PCI_PHYS_ADDR, start 189 arch/mips/alchemy/board-mtx1.c .start = 0x1e000000, start 205 arch/mips/alchemy/board-mtx1.c .start = AU1500_PCI_PHYS_ADDR, start 95 arch/mips/alchemy/board-xxs1500.c .start = AU1000_PCMCIA_IO_PHYS_ADDR, start 101 arch/mips/alchemy/board-xxs1500.c .start = AU1000_PCMCIA_ATTR_PHYS_ADDR, start 107 arch/mips/alchemy/board-xxs1500.c .start = AU1000_PCMCIA_MEM_PHYS_ADDR, start 100 arch/mips/alchemy/common/dma.c int au1000_dma_read_proc(char *buf, char **start, off_t fpos, start 114 arch/mips/alchemy/common/dma.c *start = buf; start 118 arch/mips/alchemy/common/dma.c *start = buf + fpos; start 224 arch/mips/alchemy/common/platform.c res[0].start = alchemy_ohci_data[ctype][0]; start 225 arch/mips/alchemy/common/platform.c res[0].end = res[0].start + 0x100 - 1; start 227 arch/mips/alchemy/common/platform.c res[1].start = alchemy_ohci_data[ctype][1]; start 228 arch/mips/alchemy/common/platform.c res[1].end = res[1].start; start 244 arch/mips/alchemy/common/platform.c res[0].start = alchemy_ehci_data[ctype][0]; start 245 arch/mips/alchemy/common/platform.c res[0].end = res[0].start + 0x100 - 1; start 247 arch/mips/alchemy/common/platform.c res[1].start = alchemy_ehci_data[ctype][1]; start 248 arch/mips/alchemy/common/platform.c res[1].end = res[1].start; start 264 arch/mips/alchemy/common/platform.c res[0].start = AU1300_USB_OHCI1_PHYS_ADDR; start 265 arch/mips/alchemy/common/platform.c res[0].end = res[0].start + 0x100 - 1; start 267 arch/mips/alchemy/common/platform.c res[1].start = AU1300_USB_INT; start 268 arch/mips/alchemy/common/platform.c res[1].end = res[1].start; start 284 arch/mips/alchemy/common/platform.c .start = _base, \ start 289 arch/mips/alchemy/common/platform.c .start = _enable, \ start 294 arch/mips/alchemy/common/platform.c .start = _irq, \ start 299 arch/mips/alchemy/common/platform.c .start = _macdma, \ start 69 arch/mips/alchemy/common/setup.c ioport_resource.start = IOPORT_RESOURCE_START; start 71 arch/mips/alchemy/common/setup.c iomem_resource.start = IOMEM_RESOURCE_START; start 79 arch/mips/alchemy/common/setup.c unsigned long start = ALCHEMY_PCI_MEMWIN_START; start 87 arch/mips/alchemy/common/setup.c if (phys_addr >= start && (phys_addr + size - 1) <= end) start 76 arch/mips/alchemy/devboards/db1000.c .start = AU1500_PCI_PHYS_ADDR, start 101 arch/mips/alchemy/devboards/db1000.c .start = AU1100_LCD_PHYS_ADDR, start 106 arch/mips/alchemy/devboards/db1000.c .start = AU1100_LCD_INT, start 125 arch/mips/alchemy/devboards/db1000.c .start = AU1000_AC97_PHYS_ADDR, start 130 arch/mips/alchemy/devboards/db1000.c .start = DMA_ID_AC97C_TX, start 135 arch/mips/alchemy/devboards/db1000.c .start = DMA_ID_AC97C_RX, start 311 arch/mips/alchemy/devboards/db1000.c .start = AU1100_SD0_PHYS_ADDR, start 316 arch/mips/alchemy/devboards/db1000.c .start = AU1100_SD_INT, start 321 arch/mips/alchemy/devboards/db1000.c .start = DMA_ID_SD0_TX, start 326 arch/mips/alchemy/devboards/db1000.c .start = DMA_ID_SD0_RX, start 346 arch/mips/alchemy/devboards/db1000.c .start = AU1100_SD1_PHYS_ADDR, start 351 arch/mips/alchemy/devboards/db1000.c .start = AU1100_SD_INT, start 356 arch/mips/alchemy/devboards/db1000.c .start = DMA_ID_SD1_TX, start 361 arch/mips/alchemy/devboards/db1000.c .start = DMA_ID_SD1_RX, start 244 arch/mips/alchemy/devboards/db1200.c .start = DB1200_NAND_PHYS_ADDR, start 270 arch/mips/alchemy/devboards/db1200.c .start = DB1200_ETH_PHYS_ADDR, start 275 arch/mips/alchemy/devboards/db1200.c .start = DB1200_ETH_INT, start 300 arch/mips/alchemy/devboards/db1200.c .start = DB1200_IDE_PHYS_ADDR, start 305 arch/mips/alchemy/devboards/db1200.c .start = DB1200_IDE_PHYS_ADDR + IDE_ALT_START, start 310 arch/mips/alchemy/devboards/db1200.c .start = DB1200_IDE_INT, start 535 arch/mips/alchemy/devboards/db1200.c .start = AU1100_SD0_PHYS_ADDR, start 540 arch/mips/alchemy/devboards/db1200.c .start = AU1200_SD_INT, start 545 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_SDMS_TX0, start 550 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_SDMS_RX0, start 570 arch/mips/alchemy/devboards/db1200.c .start = AU1100_SD1_PHYS_ADDR, start 575 arch/mips/alchemy/devboards/db1200.c .start = AU1200_SD_INT, start 580 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_SDMS_TX1, start 585 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_SDMS_RX1, start 634 arch/mips/alchemy/devboards/db1200.c .start = AU1200_LCD_PHYS_ADDR, start 639 arch/mips/alchemy/devboards/db1200.c .start = AU1200_LCD_INT, start 661 arch/mips/alchemy/devboards/db1200.c .start = AU1550_PSC0_PHYS_ADDR, start 666 arch/mips/alchemy/devboards/db1200.c .start = AU1200_PSC0_INT, start 671 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_PSC0_TX, start 676 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_PSC0_RX, start 717 arch/mips/alchemy/devboards/db1200.c .start = AU1550_PSC1_PHYS_ADDR, start 722 arch/mips/alchemy/devboards/db1200.c .start = AU1200_PSC1_INT, start 727 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_PSC1_TX, start 732 arch/mips/alchemy/devboards/db1200.c .start = AU1200_DSCR_CMD0_PSC1_RX, start 799 arch/mips/alchemy/devboards/db1200.c db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR; start 801 arch/mips/alchemy/devboards/db1200.c db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR; start 803 arch/mips/alchemy/devboards/db1200.c db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR; start 209 arch/mips/alchemy/devboards/db1300.c .start = DB1300_NAND_PHYS_ADDR, start 229 arch/mips/alchemy/devboards/db1300.c .start = DB1300_ETH_PHYS_ADDR, start 234 arch/mips/alchemy/devboards/db1300.c .start = DB1300_ETH_INT, start 261 arch/mips/alchemy/devboards/db1300.c .start = AU1300_PSC1_PHYS_ADDR, start 266 arch/mips/alchemy/devboards/db1300.c .start = AU1300_PSC1_INT, start 271 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_PSC1_TX, start 276 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_PSC1_RX, start 293 arch/mips/alchemy/devboards/db1300.c .start = AU1300_PSC2_PHYS_ADDR, start 298 arch/mips/alchemy/devboards/db1300.c .start = AU1300_PSC2_INT, start 303 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_PSC2_TX, start 308 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_PSC2_RX, start 325 arch/mips/alchemy/devboards/db1300.c .start = AU1300_PSC3_PHYS_ADDR, start 330 arch/mips/alchemy/devboards/db1300.c .start = AU1300_PSC3_INT, start 335 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_PSC3_TX, start 340 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_PSC3_RX, start 425 arch/mips/alchemy/devboards/db1300.c .start = DB1300_IDE_PHYS_ADDR, start 430 arch/mips/alchemy/devboards/db1300.c .start = DB1300_IDE_PHYS_ADDR + IDE_ALT_START, start 435 arch/mips/alchemy/devboards/db1300.c .start = DB1300_IDE_INT, start 544 arch/mips/alchemy/devboards/db1300.c .start = AU1300_SD1_PHYS_ADDR, start 549 arch/mips/alchemy/devboards/db1300.c .start = AU1300_SD1_INT, start 554 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_SDMS_TX1, start 559 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_SDMS_RX1, start 611 arch/mips/alchemy/devboards/db1300.c .start = AU1100_SD0_PHYS_ADDR, start 616 arch/mips/alchemy/devboards/db1300.c .start = AU1300_SD0_INT, start 621 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_SDMS_TX0, start 626 arch/mips/alchemy/devboards/db1300.c .start = AU1300_DSCR_CMD0_SDMS_RX0, start 708 arch/mips/alchemy/devboards/db1300.c .start = AU1200_LCD_PHYS_ADDR, start 713 arch/mips/alchemy/devboards/db1300.c .start = AU1300_LCD_INT, start 186 arch/mips/alchemy/devboards/db1550.c .start = 0x20000000, start 239 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC0_PHYS_ADDR, start 244 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC0_INT, start 249 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC0_TX, start 254 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC0_RX, start 291 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC1_PHYS_ADDR, start 296 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC1_INT, start 301 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC1_TX, start 306 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC1_RX, start 322 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC2_PHYS_ADDR, start 327 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC2_INT, start 332 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC2_TX, start 337 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC2_RX, start 354 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC3_PHYS_ADDR, start 359 arch/mips/alchemy/devboards/db1550.c .start = AU1550_PSC3_INT, start 364 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC3_TX, start 369 arch/mips/alchemy/devboards/db1550.c .start = AU1550_DSCR_CMD0_PSC3_RX, start 467 arch/mips/alchemy/devboards/db1550.c .start = AU1500_PCI_PHYS_ADDR, start 119 arch/mips/alchemy/devboards/platform.c sr[0].start = pcmcia_attr_start; start 124 arch/mips/alchemy/devboards/platform.c sr[1].start = pcmcia_mem_start; start 129 arch/mips/alchemy/devboards/platform.c sr[2].start = pcmcia_io_start; start 134 arch/mips/alchemy/devboards/platform.c sr[3].start = sr[3].end = cd_irq; start 138 arch/mips/alchemy/devboards/platform.c sr[4].start = sr[4].end = card_irq; start 144 arch/mips/alchemy/devboards/platform.c sr[i].start = sr[i].end = stschg_irq; start 150 arch/mips/alchemy/devboards/platform.c sr[i].start = sr[i].end = eject_irq; start 199 arch/mips/alchemy/devboards/platform.c res->start = 0x20000000 - size; start 92 arch/mips/ar7/platform.c .start = AR7_REGS_VLYNQ0, start 98 arch/mips/ar7/platform.c .start = 29, start 104 arch/mips/ar7/platform.c .start = 0x04000000, start 110 arch/mips/ar7/platform.c .start = 80, start 119 arch/mips/ar7/platform.c .start = AR7_REGS_VLYNQ1, start 125 arch/mips/ar7/platform.c .start = 33, start 131 arch/mips/ar7/platform.c .start = 0x0c000000, start 137 arch/mips/ar7/platform.c .start = 112, start 186 arch/mips/ar7/platform.c .start = 0x10000000, start 213 arch/mips/ar7/platform.c .start = AR7_REGS_MAC0, start 219 arch/mips/ar7/platform.c .start = 27, start 228 arch/mips/ar7/platform.c .start = AR7_REGS_MAC1, start 234 arch/mips/ar7/platform.c .start = 41, start 310 arch/mips/ar7/platform.c .start = AR7_REGS_USB, start 316 arch/mips/ar7/platform.c .start = 32, start 322 arch/mips/ar7/platform.c .start = 0x03400000, start 536 arch/mips/ar7/platform.c .start = -1, /* Filled at runtime */ start 604 arch/mips/ar7/platform.c vlynq_low_res[0].start = TITAN_REGS_VLYNQ0; start 606 arch/mips/ar7/platform.c vlynq_low_res[1].start = 33; start 608 arch/mips/ar7/platform.c vlynq_low_res[2].start = 0x0c000000; start 610 arch/mips/ar7/platform.c vlynq_low_res[3].start = 80; start 614 arch/mips/ar7/platform.c vlynq_high_res[0].start = TITAN_REGS_VLYNQ1; start 616 arch/mips/ar7/platform.c vlynq_high_res[1].start = 34; start 618 arch/mips/ar7/platform.c vlynq_high_res[2].start = 0x40000000; start 620 arch/mips/ar7/platform.c vlynq_high_res[3].start = 112; start 630 arch/mips/ar7/platform.c cpmac_low_res[0].start = TITAN_REGS_MAC0; start 634 arch/mips/ar7/platform.c cpmac_high_res[0].start = TITAN_REGS_MAC1; start 710 arch/mips/ar7/platform.c ar7_wdt_res.start = UR8_REGS_WDT; start 712 arch/mips/ar7/platform.c ar7_wdt_res.start = AR7_REGS_WDT; start 714 arch/mips/ar7/platform.c ar7_wdt_res.end = ar7_wdt_res.start + 0x20; start 311 arch/mips/ath25/ar2315.c .start = AR2315_PCI_BASE, start 317 arch/mips/ath25/ar2315.c .start = AR2315_PCI_EXT_BASE, start 323 arch/mips/ath25/ar2315.c .start = AR2315_IRQ_LCBUS_PCI, start 170 arch/mips/ath25/ar5312.c .start = AR5312_FLASH_BASE, start 98 arch/mips/ath25/devices.c res->start = base; start 101 arch/mips/ath25/devices.c res->start = irq; start 48 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 53 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 58 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 77 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 82 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 86 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 90 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 111 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 116 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 120 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 124 arch/mips/bcm63xx/dev-enet.c .start = -1, /* filled at runtime */ start 180 arch/mips/bcm63xx/dev-enet.c shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); start 181 arch/mips/bcm63xx/dev-enet.c shared_res[0].end = shared_res[0].start; start 194 arch/mips/bcm63xx/dev-enet.c shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC); start 195 arch/mips/bcm63xx/dev-enet.c shared_res[1].end = shared_res[1].start; start 198 arch/mips/bcm63xx/dev-enet.c shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS); start 199 arch/mips/bcm63xx/dev-enet.c shared_res[2].end = shared_res[2].start; start 228 arch/mips/bcm63xx/dev-enet.c enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0); start 229 arch/mips/bcm63xx/dev-enet.c enet0_res[0].end = enet0_res[0].start; start 231 arch/mips/bcm63xx/dev-enet.c enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0); start 232 arch/mips/bcm63xx/dev-enet.c enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA); start 233 arch/mips/bcm63xx/dev-enet.c enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA); start 236 arch/mips/bcm63xx/dev-enet.c enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1); start 237 arch/mips/bcm63xx/dev-enet.c enet1_res[0].end = enet1_res[0].start; start 239 arch/mips/bcm63xx/dev-enet.c enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1); start 240 arch/mips/bcm63xx/dev-enet.c enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA); start 241 arch/mips/bcm63xx/dev-enet.c enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA); start 302 arch/mips/bcm63xx/dev-enet.c enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW); start 303 arch/mips/bcm63xx/dev-enet.c enetsw_res[0].end = enetsw_res[0].start; start 305 arch/mips/bcm63xx/dev-enet.c enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0); start 306 arch/mips/bcm63xx/dev-enet.c enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0); start 307 arch/mips/bcm63xx/dev-enet.c if (!enetsw_res[2].start) start 308 arch/mips/bcm63xx/dev-enet.c enetsw_res[2].start = -1; start 43 arch/mips/bcm63xx/dev-flash.c .start = 0, /* filled at runtime */ start 116 arch/mips/bcm63xx/dev-flash.c mtd_resources[0].start = val; start 19 arch/mips/bcm63xx/dev-hsspi.c .start = -1, /* filled at runtime */ start 24 arch/mips/bcm63xx/dev-hsspi.c .start = -1, /* filled at runtime */ start 41 arch/mips/bcm63xx/dev-hsspi.c spi_resources[0].start = bcm63xx_regset_address(RSET_HSSPI); start 42 arch/mips/bcm63xx/dev-hsspi.c spi_resources[0].end = spi_resources[0].start; start 44 arch/mips/bcm63xx/dev-hsspi.c spi_resources[1].start = bcm63xx_get_irq_number(IRQ_HSSPI); start 28 arch/mips/bcm63xx/dev-pcmcia.c .start = BCM_PCMCIA_COMMON_BASE_PA, start 33 arch/mips/bcm63xx/dev-pcmcia.c .start = BCM_PCMCIA_ATTR_BASE_PA, start 38 arch/mips/bcm63xx/dev-pcmcia.c .start = BCM_PCMCIA_IO_BASE_PA, start 51 arch/mips/bcm63xx/dev-pcmcia.c .start = BCM_PCMCIA_IO_BASE_PA, start 125 arch/mips/bcm63xx/dev-pcmcia.c pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA); start 126 arch/mips/bcm63xx/dev-pcmcia.c pcmcia_resources[0].end = pcmcia_resources[0].start + start 128 arch/mips/bcm63xx/dev-pcmcia.c pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA); start 16 arch/mips/bcm63xx/dev-rng.c .start = -1, /* filled at runtime */ start 34 arch/mips/bcm63xx/dev-rng.c rng_resources[0].start = bcm63xx_regset_address(RSET_RNG); start 35 arch/mips/bcm63xx/dev-rng.c rng_resources[0].end = rng_resources[0].start; start 23 arch/mips/bcm63xx/dev-spi.c .start = -1, /* filled at runtime */ start 28 arch/mips/bcm63xx/dev-spi.c .start = -1, /* filled at runtime */ start 44 arch/mips/bcm63xx/dev-spi.c spi_resources[0].start = bcm63xx_regset_address(RSET_SPI); start 45 arch/mips/bcm63xx/dev-spi.c spi_resources[0].end = spi_resources[0].start; start 46 arch/mips/bcm63xx/dev-spi.c spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); start 62 arch/mips/bcm63xx/dev-uart.c uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0); start 63 arch/mips/bcm63xx/dev-uart.c uart0_resources[0].end = uart0_resources[0].start + start 65 arch/mips/bcm63xx/dev-uart.c uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); start 69 arch/mips/bcm63xx/dev-uart.c uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1); start 70 arch/mips/bcm63xx/dev-uart.c uart1_resources[0].end = uart1_resources[0].start + start 72 arch/mips/bcm63xx/dev-uart.c uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1); start 47 arch/mips/bcm63xx/dev-usb-usbd.c usbd_resources[0].start = bcm63xx_regset_address(RSET_USBD); start 48 arch/mips/bcm63xx/dev-usb-usbd.c usbd_resources[0].end = usbd_resources[0].start + RSET_USBD_SIZE - 1; start 51 arch/mips/bcm63xx/dev-usb-usbd.c usbd_resources[1].start = bcm63xx_regset_address(RSET_USBDMA); start 52 arch/mips/bcm63xx/dev-usb-usbd.c usbd_resources[1].end = usbd_resources[1].start + RSET_USBDMA_SIZE - 1; start 58 arch/mips/bcm63xx/dev-usb-usbd.c r->start = r->end = bcm63xx_get_irq_number(irq_list[i]); start 16 arch/mips/bcm63xx/dev-wdt.c .start = -1, /* filled at runtime */ start 31 arch/mips/bcm63xx/dev-wdt.c wdt_resources[0].start = bcm63xx_regset_address(RSET_WDT); start 32 arch/mips/bcm63xx/dev-wdt.c wdt_resources[0].end = wdt_resources[0].start; start 156 arch/mips/bcm63xx/setup.c ioport_resource.start = 0; start 162 arch/mips/bmips/setup.c ioport_resource.start = 0; start 414 arch/mips/cavium-octeon/executive/cvmx-l2c.c int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len) start 419 arch/mips/cavium-octeon/executive/cvmx-l2c.c len += start & CVMX_CACHE_LINE_MASK; start 420 arch/mips/cavium-octeon/executive/cvmx-l2c.c start &= ~CVMX_CACHE_LINE_MASK; start 424 arch/mips/cavium-octeon/executive/cvmx-l2c.c retval += cvmx_l2c_lock_line(start); start 425 arch/mips/cavium-octeon/executive/cvmx-l2c.c start += CVMX_CACHE_LINE_SIZE; start 506 arch/mips/cavium-octeon/executive/cvmx-l2c.c int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len) start 510 arch/mips/cavium-octeon/executive/cvmx-l2c.c len += start & CVMX_CACHE_LINE_MASK; start 511 arch/mips/cavium-octeon/executive/cvmx-l2c.c start &= ~CVMX_CACHE_LINE_MASK; start 514 arch/mips/cavium-octeon/executive/cvmx-l2c.c num_unlocked += cvmx_l2c_unlock_line(start); start 515 arch/mips/cavium-octeon/executive/cvmx-l2c.c start += CVMX_CACHE_LINE_SIZE; start 410 arch/mips/cavium-octeon/octeon-platform.c .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS), start 414 arch/mips/cavium-octeon/octeon-platform.c .start = cvmx_build_io_address(8, 0), start 542 arch/mips/cavium-octeon/octeon-usb.c devm_release_mem_region(&pdev->dev, res->start, start 1257 arch/mips/cavium-octeon/setup.c ioport_resource.start = MAX_RESOURCE; start 13 arch/mips/cobalt/buttons.c .start = 0x1d000000, start 13 arch/mips/cobalt/lcd.c .start = 0x1f000000, start 15 arch/mips/cobalt/led.c .start = 0x1c000000, start 27 arch/mips/cobalt/mtd.c .start = 0x1fc00000, start 20 arch/mips/cobalt/pci.c .start = GT_DEF_PCI0_MEM0_BASE, start 27 arch/mips/cobalt/pci.c .start = 0x1000, start 15 arch/mips/cobalt/rtc.c .start = 0x70, start 20 arch/mips/cobalt/rtc.c .start = RTC_IRQ, start 18 arch/mips/cobalt/serial.c .start = 0x1c800000, start 23 arch/mips/cobalt/serial.c .start = SERIAL_IRQ, start 50 arch/mips/cobalt/setup.c .start = 0x00, start 56 arch/mips/cobalt/setup.c .start = 0x60, start 62 arch/mips/cobalt/setup.c .start = 0x80, start 68 arch/mips/cobalt/setup.c .start = 0xc0, start 17 arch/mips/cobalt/time.c u32 start, end; start 31 arch/mips/cobalt/time.c start = read_c0_count(); start 39 arch/mips/cobalt/time.c mips_hpt_frequency = (end - start) * 10; start 35 arch/mips/dec/platform.c dec_rtc_resources[0].start = RTC_PORT(0); start 167 arch/mips/dec/setup.c ioport_resource.start = ~0UL; start 130 arch/mips/dec/time.c u32 start, end; start 144 arch/mips/dec/time.c start = read_c0_count(); start 152 arch/mips/dec/time.c mips_hpt_frequency = (end - start) * 8; start 31 arch/mips/emma/markeins/platform.c .start = EMMA2RH_IRQ_PIIC0, start 36 arch/mips/emma/markeins/platform.c .start = EMMA2RH_PIIC0_BASE, start 45 arch/mips/emma/markeins/platform.c .start = EMMA2RH_IRQ_PIIC1, start 50 arch/mips/emma/markeins/platform.c .start = EMMA2RH_PIIC1_BASE, start 59 arch/mips/emma/markeins/platform.c .start = EMMA2RH_IRQ_PIIC2, start 64 arch/mips/emma/markeins/platform.c .start = EMMA2RH_PIIC2_BASE, start 171 arch/mips/emma/markeins/platform.c .start = 0x1e000000, start 96 arch/mips/emma/markeins/setup.c ioport_resource.start = EMMA2RH_PCI_IO_BASE; start 98 arch/mips/emma/markeins/setup.c iomem_resource.start = EMMA2RH_IO_BASE; start 127 arch/mips/fw/cfe/cfe_api.c cfe_enummem(int idx, int flags, u64 *start, u64 *length, u64 *type) start 143 arch/mips/fw/cfe/cfe_api.c *start = xiocb.plist.xiocb_meminfo.mi_addr; start 37 arch/mips/generic/board-ranchu.c unsigned int start, count; start 55 arch/mips/generic/board-ranchu.c start = read_c0_count(); start 62 arch/mips/generic/board-ranchu.c count = read_c0_count() - start; start 65 arch/mips/generic/yamon-dt.c *(mem_array++) = cpu_to_be32(mr->start); start 97 arch/mips/include/asm/bootinfo.h extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type); start 98 arch/mips/include/asm/bootinfo.h extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max); start 51 arch/mips/include/asm/cacheflush.h unsigned long start, unsigned long end); start 81 arch/mips/include/asm/cacheflush.h extern void (*flush_icache_range)(unsigned long start, unsigned long end); start 82 arch/mips/include/asm/cacheflush.h extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); start 83 arch/mips/include/asm/cacheflush.h extern void (*__flush_icache_user_range)(unsigned long start, start 85 arch/mips/include/asm/cacheflush.h extern void (*__local_flush_icache_user_range)(unsigned long start, start 90 arch/mips/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 98 arch/mips/include/asm/cacheflush.h static inline void flush_cache_vunmap(unsigned long start, unsigned long end) start 78 arch/mips/include/asm/fixmap.h extern void fixrange_init(unsigned long start, unsigned long end, start 88 arch/mips/include/asm/fw/cfe/cfe_api.h int cfe_enummem(int idx, int flags, uint64_t * start, uint64_t * length, start 615 arch/mips/include/asm/io.h extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); start 616 arch/mips/include/asm/io.h extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); start 617 arch/mips/include/asm/io.h extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); start 619 arch/mips/include/asm/io.h #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) start 620 arch/mips/include/asm/io.h #define dma_cache_wback(start, size) _dma_cache_wback(start, size) start 621 arch/mips/include/asm/io.h #define dma_cache_inv(start, size) _dma_cache_inv(start, size) start 625 arch/mips/include/asm/io.h #define dma_cache_wback_inv(start,size) \ start 626 arch/mips/include/asm/io.h do { (void) (start); (void) (size); } while (0) start 627 arch/mips/include/asm/io.h #define dma_cache_wback(start,size) \ start 628 arch/mips/include/asm/io.h do { (void) (start); (void) (size); } while (0) start 629 arch/mips/include/asm/io.h #define dma_cache_inv(start,size) \ start 630 arch/mips/include/asm/io.h do { (void) (start); (void) (size); } while (0) start 942 arch/mips/include/asm/kvm_host.h unsigned long start, unsigned long end); start 944 arch/mips/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); start 125 arch/mips/include/asm/mach-au1x00/au1000_dma.h extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos, start 130 arch/mips/include/asm/mach-loongson64/boot_param.h u64 start; /* resource start address */ start 357 arch/mips/include/asm/netlogic/xlr/fmn.h extern int nlm_register_fmn_handler(int start, int end, start 264 arch/mips/include/asm/octeon/cvmx-l2c.h int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len); start 288 arch/mips/include/asm/octeon/cvmx-l2c.h int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len); start 186 arch/mips/include/asm/octeon/cvmx-lmcx-defs.h uint64_t start:1; start 188 arch/mips/include/asm/octeon/cvmx-lmcx-defs.h uint64_t start:1; start 717 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t start:33; start 721 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t start:33; start 642 arch/mips/include/asm/pgtable.h #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases) start 533 arch/mips/include/asm/r4kcache.h unsigned long start = INDEX_BASE; \ start 534 arch/mips/include/asm/r4kcache.h unsigned long end = start + current_cpu_data.desc.waysize; \ start 541 arch/mips/include/asm/r4kcache.h for (addr = start; addr < end; addr += lsize * 32) \ start 547 arch/mips/include/asm/r4kcache.h unsigned long start = page; \ start 551 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32(start, hitop); \ start 552 arch/mips/include/asm/r4kcache.h start += lsize * 32; \ start 553 arch/mips/include/asm/r4kcache.h } while (start < end); \ start 559 arch/mips/include/asm/r4kcache.h unsigned long start = INDEX_BASE + (page & indexmask); \ start 560 arch/mips/include/asm/r4kcache.h unsigned long end = start + PAGE_SIZE; \ start 567 arch/mips/include/asm/r4kcache.h for (addr = start; addr < end; addr += lsize * 32) \ start 595 arch/mips/include/asm/r4kcache.h unsigned long start = page; \ start 599 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32_user(start, hitop); \ start 600 arch/mips/include/asm/r4kcache.h start += lsize * 32; \ start 601 arch/mips/include/asm/r4kcache.h } while (start < end); \ start 616 arch/mips/include/asm/r4kcache.h static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ start 620 arch/mips/include/asm/r4kcache.h unsigned long addr = start & ~(lsize - 1); \ start 639 arch/mips/include/asm/r4kcache.h static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ start 643 arch/mips/include/asm/r4kcache.h unsigned long addr = start & ~(lsize - 1); \ start 682 arch/mips/include/asm/r4kcache.h unsigned long start = CAC_BASE | nid_to_addrbase(node); \ start 683 arch/mips/include/asm/r4kcache.h unsigned long end = start + current_cpu_data.desc.waysize; \ start 690 arch/mips/include/asm/r4kcache.h for (addr = start; addr < end; addr += lsize * 32) \ start 889 arch/mips/include/asm/sn/klconfig.h extern lboard_t *find_lboard(lboard_t *start, unsigned char type); start 893 arch/mips/include/asm/sn/klconfig.h extern lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_class); start 18 arch/mips/include/asm/tlbflush.h unsigned long start, unsigned long end); start 19 arch/mips/include/asm/tlbflush.h extern void local_flush_tlb_kernel_range(unsigned long start, start 15 arch/mips/include/asm/txx9/generic.h #define TXX9_CE(n) (unsigned long)(txx9_ce_res[(n)].start) start 94 arch/mips/include/asm/vpe.h void (*start)(int vpe); start 19 arch/mips/include/asm/yamon-dt.h phys_addr_t start; start 40 arch/mips/jazz/reset.c unsigned long start = jiffies; start 41 arch/mips/jazz/reset.c unsigned long timeout = start + HZ/2; start 33 arch/mips/jazz/setup.c .start = 0x00, start 38 arch/mips/jazz/setup.c .start = 0x40, start 43 arch/mips/jazz/setup.c .start = 0x80, start 48 arch/mips/jazz/setup.c .start = 0xc0, start 124 arch/mips/jazz/setup.c .start = JAZZ_SCSI_BASE, start 129 arch/mips/jazz/setup.c .start = JAZZ_SCSI_DMA, start 134 arch/mips/jazz/setup.c .start = JAZZ_SCSI_IRQ, start 154 arch/mips/jazz/setup.c .start = JAZZ_ETHERNET_BASE, start 159 arch/mips/jazz/setup.c .start = JAZZ_ETHERNET_IRQ, start 179 arch/mips/jazz/setup.c .start = 0x70, start 184 arch/mips/jazz/setup.c .start = 8, start 36 arch/mips/kernel/csrc-ioasic.c u32 start, end; start 43 arch/mips/kernel/csrc-ioasic.c start = dec_ioasic_hpt_read(&clocksource_dec); start 51 arch/mips/kernel/csrc-ioasic.c freq = (end - start) * 8; start 39 arch/mips/kernel/machine_kexec.c pr_debug(" start: %lx\n", kimage->start); start 218 arch/mips/kernel/machine_kexec.c (unsigned long) phys_to_virt(image->start); start 252 arch/mips/kernel/machine_kexec.c printk("Will call new kernel at %08lx\n", image->start); start 31 arch/mips/kernel/mips-cpc.c return res.start; start 652 arch/mips/kernel/perf_event_mipsxx.c .start = mipspmu_start, start 185 arch/mips/kernel/proc.c .start = c_start, start 232 arch/mips/kernel/ptrace.c unsigned start, num_regs, i; start 235 arch/mips/kernel/ptrace.c start = pos / sizeof(u32); start 238 arch/mips/kernel/ptrace.c if (start + num_regs > ELF_NGREG) start 246 arch/mips/kernel/ptrace.c for (i = start; i < num_regs; i++) { start 299 arch/mips/kernel/ptrace.c unsigned start, num_regs, i; start 302 arch/mips/kernel/ptrace.c start = pos / sizeof(u64); start 305 arch/mips/kernel/ptrace.c if (start + num_regs > ELF_NGREG) start 313 arch/mips/kernel/ptrace.c for (i = start; i < num_regs; i++) { start 640 arch/mips/kernel/ptrace.c int i, j, start, start_pad, err; start 648 arch/mips/kernel/ptrace.c i = start = err = 0; start 649 arch/mips/kernel/ptrace.c for (; i < NUM_FPU_REGS; i++, start += regset->size) { start 652 arch/mips/kernel/ptrace.c start, start + cp_sz); start 654 arch/mips/kernel/ptrace.c start_pad = start + cp_sz; start 714 arch/mips/kernel/ptrace.c int i, err, start; start 728 arch/mips/kernel/ptrace.c i = start = err = 0; start 729 arch/mips/kernel/ptrace.c for (; i < NUM_FPU_REGS; i++, start += regset->size) { start 732 arch/mips/kernel/ptrace.c start, start + cp_sz); start 759 arch/mips/kernel/ptrace.c unsigned int start, num_regs, i; start 767 arch/mips/kernel/ptrace.c start = pos / sizeof(u32); start 770 arch/mips/kernel/ptrace.c if (start + num_regs > NUM_DSP_REGS + 1) start 773 arch/mips/kernel/ptrace.c for (i = start; i < num_regs; i++) start 794 arch/mips/kernel/ptrace.c unsigned int start, num_regs, i; start 803 arch/mips/kernel/ptrace.c start = pos / sizeof(u32); start 806 arch/mips/kernel/ptrace.c if (start + num_regs > NUM_DSP_REGS + 1) start 814 arch/mips/kernel/ptrace.c for (i = start; i < num_regs; i++) start 839 arch/mips/kernel/ptrace.c unsigned int start, num_regs, i; start 847 arch/mips/kernel/ptrace.c start = pos / sizeof(u64); start 850 arch/mips/kernel/ptrace.c if (start + num_regs > NUM_DSP_REGS + 1) start 853 arch/mips/kernel/ptrace.c for (i = start; i < num_regs; i++) start 874 arch/mips/kernel/ptrace.c unsigned int start, num_regs, i; start 883 arch/mips/kernel/ptrace.c start = pos / sizeof(u64); start 886 arch/mips/kernel/ptrace.c if (start + num_regs > NUM_DSP_REGS + 1) start 894 arch/mips/kernel/ptrace.c for (i = start; i < num_regs; i++) start 89 arch/mips/kernel/rtlx-cmp.c rtlx_notify.start = rtlx_starting; start 115 arch/mips/kernel/rtlx-mt.c rtlx_notify.start = rtlx_starting; start 91 arch/mips/kernel/setup.c void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) start 102 arch/mips/kernel/setup.c if (start + size - 1 == PHYS_ADDR_MAX) start 106 arch/mips/kernel/setup.c if (start + size < start) { start 111 arch/mips/kernel/setup.c if (start < PHYS_OFFSET) start 114 arch/mips/kernel/setup.c memblock_add(start, size); start 121 arch/mips/kernel/setup.c memblock_remove(start, size); start 125 arch/mips/kernel/setup.c memblock_reserve(start, size); start 130 arch/mips/kernel/setup.c void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) start 142 arch/mips/kernel/setup.c (unsigned long long) start, start 146 arch/mips/kernel/setup.c add_memory_region(start, size, BOOT_MEM_RAM); start 156 arch/mips/kernel/setup.c unsigned long start = memparse(p, &p); start 160 arch/mips/kernel/setup.c if (start < XKPHYS) start 161 arch/mips/kernel/setup.c start = (int)start; start 163 arch/mips/kernel/setup.c initrd_start = start; start 164 arch/mips/kernel/setup.c initrd_end += start; start 339 arch/mips/kernel/setup.c unsigned long start = memblock_region_memory_base_pfn(mem); start 350 arch/mips/kernel/setup.c if (start >= PFN_DOWN(HIGHMEM_START)) start 391 arch/mips/kernel/setup.c phys_addr_t start, size; start 403 arch/mips/kernel/setup.c start = 0; start 406 arch/mips/kernel/setup.c start = memparse(p + 1, &p); start 408 arch/mips/kernel/setup.c add_memory_region(start, size, BOOT_MEM_RAM); start 463 arch/mips/kernel/setup.c unsigned long start = mem->base; start 464 arch/mips/kernel/setup.c unsigned long end = start + mem->size; start 465 arch/mips/kernel/setup.c if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { start 502 arch/mips/kernel/setup.c crashk_res.start = crash_base; start 510 arch/mips/kernel/setup.c if (crashk_res.start == crashk_res.end) start 517 arch/mips/kernel/setup.c crashk_res.start + 1) >> 20), start 518 arch/mips/kernel/setup.c (unsigned long)(crashk_res.start >> 20)); start 532 arch/mips/kernel/setup.c phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text))); start 533 arch/mips/kernel/setup.c phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start; start 535 arch/mips/kernel/setup.c if (!memblock_is_region_memory(start, size)) { start 537 arch/mips/kernel/setup.c memblock_add(start, size); start 651 arch/mips/kernel/setup.c if (crashk_res.start != crashk_res.end) start 652 arch/mips/kernel/setup.c memblock_reserve(crashk_res.start, start 653 arch/mips/kernel/setup.c crashk_res.end - crashk_res.start + 1); start 679 arch/mips/kernel/setup.c code_resource.start = __pa_symbol(&_text); start 681 arch/mips/kernel/setup.c data_resource.start = __pa_symbol(&_etext); start 683 arch/mips/kernel/setup.c bss_resource.start = __pa_symbol(&__bss_start); start 687 arch/mips/kernel/setup.c phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region)); start 696 arch/mips/kernel/setup.c res->start = start; start 457 arch/mips/kernel/smp-bmips.c static void bmips_wr_vec(unsigned long dst, char *start, char *end) start 459 arch/mips/kernel/smp-bmips.c memcpy((void *)dst, start, end - start); start 460 arch/mips/kernel/smp-bmips.c dma_cache_wback(dst, end - start); start 461 arch/mips/kernel/smp-bmips.c local_flush_icache_range(dst, dst + (end - start)); start 566 arch/mips/kernel/smp.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 578 arch/mips/kernel/smp.c addr = round_down(start, PAGE_SIZE * 2); start 591 arch/mips/kernel/smp.c .addr1 = start, start 596 arch/mips/kernel/smp.c local_flush_tlb_range(vma, start, end); start 611 arch/mips/kernel/smp.c local_flush_tlb_range(vma, start, end); start 623 arch/mips/kernel/smp.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 626 arch/mips/kernel/smp.c .addr1 = start, start 13 arch/mips/kernel/spinlock_test.c ktime_t start, finish; start 21 arch/mips/kernel/spinlock_test.c start = ktime_get(); start 33 arch/mips/kernel/spinlock_test.c *val = ktime_us_delta(finish, start); start 52 arch/mips/kernel/spinlock_test.c ktime_t start; start 70 arch/mips/kernel/spinlock_test.c pt->start = ktime_get(); start 112 arch/mips/kernel/spinlock_test.c *val = ktime_us_delta(finish, t1.start); start 142 arch/mips/kernel/vpe-mt.c notifier->start(VPE_MODULE_MINOR); start 196 arch/mips/kernel/vpe-mt.c int vpe_start(void *vpe, unsigned long start) start 200 arch/mips/kernel/vpe-mt.c v->__start = start; start 266 arch/mips/kvm/mips.c static inline void dump_handler(const char *symbol, void *start, void *end) start 275 arch/mips/kvm/mips.c for (p = start; p < (u32 *)end; ++p) start 307 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \ start 311 arch/mips/kvm/mmu.c int i_min = __pte_offset(start); \ start 331 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \ start 337 arch/mips/kvm/mmu.c int i_min = __pmd_offset(start); \ start 341 arch/mips/kvm/mmu.c for (i = i_min; i <= i_max; ++i, start = 0) { \ start 349 arch/mips/kvm/mmu.c ret |= kvm_mips_##name##_pte(pte, start, cur_end); \ start 354 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \ start 360 arch/mips/kvm/mmu.c int i_min = __pud_offset(start); \ start 364 arch/mips/kvm/mmu.c for (i = i_min; i <= i_max; ++i, start = 0) { \ start 372 arch/mips/kvm/mmu.c ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \ start 377 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \ start 383 arch/mips/kvm/mmu.c int i_min = pgd_index(start); \ start 387 arch/mips/kvm/mmu.c for (i = i_min; i <= i_max; ++i, start = 0) { \ start 395 arch/mips/kvm/mmu.c ret |= kvm_mips_##name##_pud(pud, start, cur_end); \ start 446 arch/mips/kvm/mmu.c gfn_t start = base_gfn + __ffs(mask); start 449 arch/mips/kvm/mmu.c kvm_mips_mkclean_gpa_pt(kvm, start, end); start 469 arch/mips/kvm/mmu.c unsigned long start, start 488 arch/mips/kvm/mmu.c hva_start = max(start, memslot->userspace_addr); start 515 arch/mips/kvm/mmu.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) start 517 arch/mips/kvm/mmu.c handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); start 582 arch/mips/kvm/mmu.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) start 584 arch/mips/kvm/mmu.c return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); start 209 arch/mips/lantiq/falcon/sysctrl.c if ((request_mem_region(res_status.start, resource_size(&res_status), start 211 arch/mips/lantiq/falcon/sysctrl.c (request_mem_region(res_ebu.start, resource_size(&res_ebu), start 213 arch/mips/lantiq/falcon/sysctrl.c (request_mem_region(res_sys[0].start, start 216 arch/mips/lantiq/falcon/sysctrl.c (request_mem_region(res_sys[1].start, start 219 arch/mips/lantiq/falcon/sysctrl.c (request_mem_region(res_sys[2].start, start 224 arch/mips/lantiq/falcon/sysctrl.c status_membase = ioremap_nocache(res_status.start, start 226 arch/mips/lantiq/falcon/sysctrl.c ltq_ebu_membase = ioremap_nocache(res_ebu.start, start 233 arch/mips/lantiq/falcon/sysctrl.c sysctl_membase[i] = ioremap_nocache(res_sys[i].start, start 348 arch/mips/lantiq/irq.c if (!request_mem_region(res.start, resource_size(&res), start 352 arch/mips/lantiq/irq.c ltq_icu_membase[vpe] = ioremap_nocache(res.start, start 401 arch/mips/lantiq/irq.c if (!request_mem_region(res.start, resource_size(&res), start 405 arch/mips/lantiq/irq.c ltq_eiu_membase = ioremap_nocache(res.start, start 73 arch/mips/lantiq/prom.c ioport_resource.start = IOPORT_RESOURCE_START; start 75 arch/mips/lantiq/prom.c iomem_resource.start = IOMEM_RESOURCE_START; start 75 arch/mips/lantiq/xway/gptu.c int timer = irq - irqres[0].start; start 96 arch/mips/lantiq/xway/gptu.c int ret = request_irq(irqres[clk->bits].start, timer_irq_handler, start 117 arch/mips/lantiq/xway/gptu.c free_irq(irqres[clk->bits].start, NULL); start 426 arch/mips/lantiq/xway/sysctrl.c if (!request_mem_region(res_pmu.start, resource_size(&res_pmu), start 428 arch/mips/lantiq/xway/sysctrl.c !request_mem_region(res_cgu.start, resource_size(&res_cgu), start 430 arch/mips/lantiq/xway/sysctrl.c !request_mem_region(res_ebu.start, resource_size(&res_ebu), start 434 arch/mips/lantiq/xway/sysctrl.c pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu)); start 435 arch/mips/lantiq/xway/sysctrl.c ltq_cgu_membase = ioremap_nocache(res_cgu.start, start 437 arch/mips/lantiq/xway/sysctrl.c ltq_ebu_membase = ioremap_nocache(res_ebu.start, start 113 arch/mips/lasat/prom.c ioport_resource.start = 0; start 37 arch/mips/lasat/serial.c lasat_serial_res[0].start = KSEG1ADDR(LASAT_UART_REGS_BASE_100); start 38 arch/mips/lasat/serial.c lasat_serial_res[0].end = lasat_serial_res[0].start + LASAT_UART_REGS_SHIFT_100 * 8 - 1; start 40 arch/mips/lasat/serial.c lasat_serial_res[1].start = LASATINT_UART_100; start 49 arch/mips/lasat/serial.c lasat_serial_res[0].start = KSEG1ADDR(LASAT_UART_REGS_BASE_200); start 50 arch/mips/lasat/serial.c lasat_serial_res[0].end = lasat_serial_res[0].start + LASAT_UART_REGS_SHIFT_200 * 8 - 1; start 52 arch/mips/lasat/serial.c lasat_serial_res[1].start = LASATINT_UART_200; start 164 arch/mips/loongson32/common/platform.c .start = LS1X_GMAC0_BASE, start 170 arch/mips/loongson32/common/platform.c .start = LS1X_GMAC0_IRQ, start 201 arch/mips/loongson32/common/platform.c .start = LS1X_GMAC1_BASE, start 207 arch/mips/loongson32/common/platform.c .start = LS1X_GMAC1_IRQ, start 226 arch/mips/loongson32/common/platform.c .start = LS1X_GPIO0_BASE, start 241 arch/mips/loongson32/common/platform.c .start = LS1X_GPIO1_BASE, start 259 arch/mips/loongson32/common/platform.c .start = LS1X_EHCI_BASE, start 264 arch/mips/loongson32/common/platform.c .start = LS1X_EHCI_IRQ, start 300 arch/mips/loongson32/common/platform.c .start = LS1X_WDT_BASE, start 140 arch/mips/loongson64/common/mem.c if (!r->start && r->end) start 145 arch/mips/loongson64/common/mem.c uca_start = r->start; start 14 arch/mips/loongson64/common/pci.c .start = LOONGSON_PCI_MEM_START, start 21 arch/mips/loongson64/common/pci.c .start = LOONGSON_PCI_IO_START, start 85 arch/mips/loongson64/common/pci.c loongson_pci_mem_resource.start = loongson_sysconf.pci_mem_start_addr; start 15 arch/mips/loongson64/common/rtc.c .start = RTC_PORT(0), start 19 arch/mips/loongson64/common/rtc.c .start = RTC_IRQ, start 53 arch/mips/mm/c-octeon.c static void local_octeon_flush_icache_range(unsigned long start, start 124 arch/mips/mm/c-octeon.c static void octeon_flush_icache_range(unsigned long start, unsigned long end) start 138 arch/mips/mm/c-octeon.c unsigned long start, unsigned long end) start 104 arch/mips/mm/c-r3k.c static void r3k_flush_icache_range(unsigned long start, unsigned long end) start 109 arch/mips/mm/c-r3k.c size = end - start; start 110 arch/mips/mm/c-r3k.c if (size > icache_size || KSEGX(start) != KSEG0) { start 111 arch/mips/mm/c-r3k.c start = KSEG0; start 114 arch/mips/mm/c-r3k.c p = (char *)start; start 161 arch/mips/mm/c-r3k.c static void r3k_flush_dcache_range(unsigned long start, unsigned long end) start 166 arch/mips/mm/c-r3k.c size = end - start; start 167 arch/mips/mm/c-r3k.c if (size > dcache_size || KSEGX(start) != KSEG0) { start 168 arch/mips/mm/c-r3k.c start = KSEG0; start 171 arch/mips/mm/c-r3k.c p = (char *)start; start 233 arch/mips/mm/c-r3k.c unsigned long start, unsigned long end) start 282 arch/mips/mm/c-r3k.c static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size) start 288 arch/mips/mm/c-r3k.c r3k_flush_dcache_range(start, start + size); start 263 arch/mips/mm/c-r4k.c unsigned long start = INDEX_BASE; start 264 arch/mips/mm/c-r4k.c unsigned long end = start + current_cpu_data.icache.waysize; start 273 arch/mips/mm/c-r4k.c for (addr = start + 0x400; addr < end; addr += 0x400 * 2) start 278 arch/mips/mm/c-r4k.c for (addr = start; addr < end; addr += 0x400 * 2) start 294 arch/mips/mm/c-r4k.c unsigned long start = INDEX_BASE + (page & indexmask); start 295 arch/mips/mm/c-r4k.c unsigned long end = start + PAGE_SIZE; start 304 arch/mips/mm/c-r4k.c for (addr = start + 0x400; addr < end; addr += 0x400 * 2) start 309 arch/mips/mm/c-r4k.c for (addr = start; addr < end; addr += 0x400 * 2) start 597 arch/mips/mm/c-r4k.c unsigned long start, unsigned long end) start 744 arch/mips/mm/c-r4k.c unsigned long start; start 750 arch/mips/mm/c-r4k.c static inline void __local_r4k_flush_icache_range(unsigned long start, start 757 arch/mips/mm/c-r4k.c (type & R4K_INDEX && end - start >= dcache_size)) { start 762 arch/mips/mm/c-r4k.c protected_blast_dcache_range(start, end); start 764 arch/mips/mm/c-r4k.c blast_dcache_range(start, end); start 769 arch/mips/mm/c-r4k.c (type & R4K_INDEX && end - start > icache_size)) start 774 arch/mips/mm/c-r4k.c protected_loongson2_blast_icache_range(start, end); start 779 arch/mips/mm/c-r4k.c protected_blast_icache_range(start, end); start 781 arch/mips/mm/c-r4k.c blast_icache_range(start, end); start 787 arch/mips/mm/c-r4k.c static inline void local_r4k_flush_icache_range(unsigned long start, start 790 arch/mips/mm/c-r4k.c __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false); start 793 arch/mips/mm/c-r4k.c static inline void local_r4k_flush_icache_user_range(unsigned long start, start 796 arch/mips/mm/c-r4k.c __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true); start 802 arch/mips/mm/c-r4k.c unsigned long start = fir_args->start; start 807 arch/mips/mm/c-r4k.c __local_r4k_flush_icache_range(start, end, type, user); start 810 arch/mips/mm/c-r4k.c static void __r4k_flush_icache_range(unsigned long start, unsigned long end, start 816 arch/mips/mm/c-r4k.c args.start = start; start 831 arch/mips/mm/c-r4k.c size = end - start; start 845 arch/mips/mm/c-r4k.c static void r4k_flush_icache_range(unsigned long start, unsigned long end) start 847 arch/mips/mm/c-r4k.c return __r4k_flush_icache_range(start, end, false); start 850 arch/mips/mm/c-r4k.c static void r4k_flush_icache_user_range(unsigned long start, unsigned long end) start 852 arch/mips/mm/c-r4k.c return __r4k_flush_icache_range(start, end, true); start 158 arch/mips/mm/c-tx39.c unsigned long start, unsigned long end) start 232 arch/mips/mm/c-tx39.c static void tx39_flush_icache_range(unsigned long start, unsigned long end) start 234 arch/mips/mm/c-tx39.c if (end - start > dcache_size) start 237 arch/mips/mm/c-tx39.c protected_blast_dcache_range(start, end); start 239 arch/mips/mm/c-tx39.c if (end - start > icache_size) start 248 arch/mips/mm/c-tx39.c protected_blast_icache_range(start, end); start 30 arch/mips/mm/cache.c void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, start 34 arch/mips/mm/cache.c void (*flush_icache_range)(unsigned long start, unsigned long end); start 36 arch/mips/mm/cache.c void (*local_flush_icache_range)(unsigned long start, unsigned long end); start 38 arch/mips/mm/cache.c void (*__flush_icache_user_range)(unsigned long start, unsigned long end); start 40 arch/mips/mm/cache.c void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end); start 61 arch/mips/mm/cache.c void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); start 62 arch/mips/mm/cache.c void (*_dma_cache_wback)(unsigned long start, unsigned long size); start 63 arch/mips/mm/cache.c void (*_dma_cache_inv)(unsigned long start, unsigned long size); start 230 arch/mips/mm/init.c void __init fixrange_init(unsigned long start, unsigned long end, start 241 arch/mips/mm/init.c vaddr = start; start 27 arch/mips/mm/sc-r5k.c unsigned long start = INDEX_BASE; start 28 arch/mips/mm/sc-r5k.c unsigned long end = start + scache_size; start 30 arch/mips/mm/sc-r5k.c while(start < end) { start 31 arch/mips/mm/sc-r5k.c cache_op(R5K_Page_Invalidate_S, start); start 32 arch/mips/mm/sc-r5k.c start += SC_PAGE; start 93 arch/mips/mm/sc-rm7k.c unsigned long start = CKSEG0ADDR(0); start 94 arch/mips/mm/sc-rm7k.c unsigned long end = start + tcache_size; start 98 arch/mips/mm/sc-rm7k.c while (start < end) { start 99 arch/mips/mm/sc-rm7k.c cache_op(Page_Invalidate_T, start); start 100 arch/mips/mm/sc-rm7k.c start += tc_pagesize; start 70 arch/mips/mm/tlb-r3k.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 82 arch/mips/mm/tlb-r3k.c cpu_context(cpu, mm) & asid_mask, start, end); start 85 arch/mips/mm/tlb-r3k.c size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; start 90 arch/mips/mm/tlb-r3k.c start &= PAGE_MASK; start 93 arch/mips/mm/tlb-r3k.c while (start < end) { start 96 arch/mips/mm/tlb-r3k.c write_c0_entryhi(start | newpid); start 97 arch/mips/mm/tlb-r3k.c start += PAGE_SIZE; /* BARRIER */ start 114 arch/mips/mm/tlb-r3k.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 119 arch/mips/mm/tlb-r3k.c printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end); start 122 arch/mips/mm/tlb-r3k.c size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; start 126 arch/mips/mm/tlb-r3k.c start &= PAGE_MASK; start 130 arch/mips/mm/tlb-r3k.c while (start < end) { start 133 arch/mips/mm/tlb-r3k.c write_c0_entryhi(start); start 134 arch/mips/mm/tlb-r3k.c start += PAGE_SIZE; /* BARRIER */ start 107 arch/mips/mm/tlb-r4k.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 117 arch/mips/mm/tlb-r4k.c start = round_down(start, PAGE_SIZE << 1); start 119 arch/mips/mm/tlb-r4k.c size = (end - start) >> (PAGE_SHIFT + 1); start 133 arch/mips/mm/tlb-r4k.c while (start < end) { start 137 arch/mips/mm/tlb-r4k.c write_c0_entryhi(start); start 139 arch/mips/mm/tlb-r4k.c write_c0_entryhi(start | newpid); start 140 arch/mips/mm/tlb-r4k.c start += (PAGE_SIZE << 1); start 167 arch/mips/mm/tlb-r4k.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 172 arch/mips/mm/tlb-r4k.c size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; start 179 arch/mips/mm/tlb-r4k.c start &= (PAGE_MASK << 1); start 184 arch/mips/mm/tlb-r4k.c while (start < end) { start 187 arch/mips/mm/tlb-r4k.c write_c0_entryhi(start); start 188 arch/mips/mm/tlb-r4k.c start += (PAGE_SIZE << 1); start 257 arch/mips/mm/tlbex.c static inline void dump_handler(const char *symbol, const void *start, const void *end) start 259 arch/mips/mm/tlbex.c unsigned int count = (end - start) / sizeof(u32); start 260 arch/mips/mm/tlbex.c const u32 *handler = start; start 84 arch/mips/mti-malta/malta-amon.c n->start(VPE_MODULE_MINOR); start 172 arch/mips/mti-malta/malta-init.c u32 start, map, mask, data; start 188 arch/mips/mti-malta/malta-init.c start = GT_READ(GT_PCI0IOLD_OFS); start 190 arch/mips/mti-malta/malta-init.c if ((start & map) != 0) { start 191 arch/mips/mti-malta/malta-init.c map &= ~start; start 37 arch/mips/mti-malta/malta-setup.c .start = 0x00, start 43 arch/mips/mti-malta/malta-setup.c .start = 0x40, start 49 arch/mips/mti-malta/malta-setup.c .start = 0x60, start 55 arch/mips/mti-malta/malta-setup.c .start = 0x80, start 61 arch/mips/mti-malta/malta-setup.c .start = 0xc0, start 64 arch/mips/mti-malta/malta-time.c unsigned int count, start; start 85 arch/mips/mti-malta/malta-time.c start = read_c0_count(); start 115 arch/mips/mti-malta/malta-time.c count -= start; start 299 arch/mips/netlogic/common/irq.c bus = (res.start >> 20) & 0xf; start 312 arch/mips/netlogic/common/irq.c socid = (res.start >> 18) & 0x3; start 150 arch/mips/netlogic/xlr/platform-flash.c res->start = flash_map_base + ((unsigned long)base << 16); start 151 arch/mips/netlogic/xlr/platform-flash.c res->end = res->start + (mask + 1) * 64 * 1024; start 121 arch/mips/netlogic/xlr/platform.c .start = irq, \ start 181 arch/mips/netlogic/xlr/platform.c xls_usb_ehci_device.resource[0].start = memres; start 186 arch/mips/netlogic/xlr/platform.c xls_usb_ohci_device_0.resource[0].start = memres; start 191 arch/mips/netlogic/xlr/platform.c xls_usb_ohci_device_1.resource[0].start = memres; start 217 arch/mips/netlogic/xlr/platform.c .start = 0, /* filled at init */ start 237 arch/mips/netlogic/xlr/platform.c nlm_xlr_i2c_1.resource[0].start = CPHYSADDR(nlm_mmio_base(offset)); start 238 arch/mips/netlogic/xlr/platform.c nlm_xlr_i2c_1.resource[0].end = nlm_xlr_i2c_1.resource[0].start + 0xfff; start 146 arch/mips/netlogic/xlr/setup.c u64 start, size; start 154 arch/mips/netlogic/xlr/setup.c start = bootm->map[i].addr; start 158 arch/mips/netlogic/xlr/setup.c if (i == 0 && start == 0 && size == 0x0c000000) start 161 arch/mips/netlogic/xlr/setup.c add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); start 133 arch/mips/oprofile/common.c ops->start = op_mips_start; start 65 arch/mips/pci/fixup-emma2rh.c dev->resource[i].start = 0; start 45 arch/mips/pci/fixup-loongson3.c if (res->start) start 55 arch/mips/pci/fixup-loongson3.c res->start = virt_to_phys((void *) loongson_sysconf.vgabios_addr); start 56 arch/mips/pci/fixup-loongson3.c res->end = res->start + 256*1024 - 1; start 33 arch/mips/pci/fixup-sb1250.c unsigned char start; start 43 arch/mips/pci/fixup-sb1250.c exclude_this = exclude->set && (dev->bus->number >= exclude->start && start 51 arch/mips/pci/fixup-sb1250.c exclude->start = dev->subordinate->number; start 55 arch/mips/pci/fixup-sb1250.c exclude->start, exclude->end); start 208 arch/mips/pci/ops-pmcmsp.c .start = 0x04, start 243 arch/mips/pci/ops-pmcmsp.c .start = MSP_PCI_SPACE_BASE, start 134 arch/mips/pci/ops-tx3927.c channel->io_resource->start + mips_io_port_base - IO_BASE; start 136 arch/mips/pci/ops-tx3927.c channel->io_resource->end - channel->io_resource->start; start 138 arch/mips/pci/ops-tx3927.c channel->io_resource->start - channel->io_offset; start 140 arch/mips/pci/ops-tx3927.c channel->mem_resource->start; start 142 arch/mips/pci/ops-tx3927.c channel->mem_resource->end - channel->mem_resource->start; start 144 arch/mips/pci/ops-tx3927.c channel->mem_resource->start - channel->mem_offset; start 247 arch/mips/pci/ops-tx4927.c __raw_writel((channel->io_resource->end - channel->io_resource->start) start 250 arch/mips/pci/ops-tx4927.c ____raw_writeq((channel->io_resource->start + start 258 arch/mips/pci/ops-tx4927.c ____raw_writeq(channel->io_resource->start - channel->io_offset, start 267 arch/mips/pci/ops-tx4927.c - channel->mem_resource->start) >> 4, start 269 arch/mips/pci/ops-tx4927.c ____raw_writeq(channel->mem_resource->start | start 276 arch/mips/pci/ops-tx4927.c ____raw_writeq(channel->mem_resource->start - start 59 arch/mips/pci/pci-alchemy.c .start = ALCHEMY_PCI_MEMWIN_START, start 66 arch/mips/pci/pci-alchemy.c .start = ALCHEMY_PCI_IOWIN_START, start 393 arch/mips/pci/pci-alchemy.c if (!request_mem_region(r->start, resource_size(r), pdev->name)) { start 412 arch/mips/pci/pci-alchemy.c ctx->regs = ioremap_nocache(r->start, resource_size(r)); start 499 arch/mips/pci/pci-alchemy.c release_mem_region(r->start, resource_size(r)); start 439 arch/mips/pci/pci-ar2315.c apc->mem_res.start = res->start; start 444 arch/mips/pci/pci-ar2315.c apc->cfg_mem = devm_ioremap_nocache(dev, res->start, start 485 arch/mips/pci/pci-ar2315.c apc->io_res.start = 0; start 354 arch/mips/pci/pci-ar71xx.c apc->io_res.start = res->start; start 364 arch/mips/pci/pci-ar71xx.c apc->mem_res.start = res->start; start 400 arch/mips/pci/pci-ar724x.c apc->io_res.start = res->start; start 410 arch/mips/pci/pci-ar724x.c apc->mem_res.start = res->start; start 169 arch/mips/pci/pci-bcm1480.c .start = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES, start 176 arch/mips/pci/pci-bcm1480.c .start = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, start 163 arch/mips/pci/pci-bcm1480ht.c .start = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES, start 170 arch/mips/pci/pci-bcm1480ht.c .start = A_BCM1480_PHYS_HT_IO_MATCH_BYTES, start 29 arch/mips/pci/pci-bcm63xx.c .start = BCM_PCI_MEM_BASE_PA, start 36 arch/mips/pci/pci-bcm63xx.c .start = BCM_PCI_IO_BASE_PA, start 59 arch/mips/pci/pci-bcm63xx.c .start = BCM_CB_MEM_BASE_PA, start 66 arch/mips/pci/pci-bcm63xx.c .start = BCM_PCI_IO_HALF_PA + 1, start 80 arch/mips/pci/pci-bcm63xx.c .start = BCM_PCIE_MEM_BASE_PA, start 87 arch/mips/pci/pci-bcm63xx.c .start = 0, start 21 arch/mips/pci/pci-emma2rh.c .start = EMMA2RH_PCI_IO_BASE, start 28 arch/mips/pci/pci-emma2rh.c .start = EMMA2RH_PCI_MEM_BASE, start 28 arch/mips/pci/pci-generic.c resource_size_t start = res->start; start 31 arch/mips/pci/pci-generic.c if (res->flags & IORESOURCE_IO && start & 0x300) start 32 arch/mips/pci/pci-generic.c start = (start + 0x3ff) & ~0x3ff; start 34 arch/mips/pci/pci-generic.c start = (start + align - 1) & ~(align - 1); start 40 arch/mips/pci/pci-generic.c start, size, align); start 42 arch/mips/pci/pci-generic.c return start; start 89 arch/mips/pci/pci-ip32.c .start = MACEPCI_HI_MEMORY, start 95 arch/mips/pci/pci-ip32.c .start = 0x00000000UL, start 103 arch/mips/pci/pci-ip32.c .start = MACEPCI_LOW_MEMORY, start 109 arch/mips/pci/pci-ip32.c .start = 0x00000000, start 21 arch/mips/pci/pci-lasat.c .start = 0x18000000, start 28 arch/mips/pci/pci-lasat.c .start = 0x1a000000, start 51 arch/mips/pci/pci-legacy.c resource_size_t start = res->start; start 55 arch/mips/pci/pci-legacy.c if (start < PCIBIOS_MIN_IO + hose->io_resource->start) start 56 arch/mips/pci/pci-legacy.c start = PCIBIOS_MIN_IO + hose->io_resource->start; start 61 arch/mips/pci/pci-legacy.c if (start & 0x300) start 62 arch/mips/pci/pci-legacy.c start = (start + 0x3ff) & ~0x3ff; start 65 arch/mips/pci/pci-legacy.c if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start) start 66 arch/mips/pci/pci-legacy.c start = PCIBIOS_MIN_MEM + hose->mem_resource->start; start 69 arch/mips/pci/pci-legacy.c return start; start 266 arch/mips/pci/pci-legacy.c if (!r->start && r->end) { start 30 arch/mips/pci/pci-malta.c .start = 0x00000000UL, start 81 arch/mips/pci/pci-malta.c resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; start 103 arch/mips/pci/pci-malta.c start = GT_READ(GT_PCI0M0LD_OFS); start 106 arch/mips/pci/pci-malta.c end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); start 112 arch/mips/pci/pci-malta.c if (end1 - start1 > end - start) { start 113 arch/mips/pci/pci-malta.c start = start1; start 117 arch/mips/pci/pci-malta.c mask = ~(start ^ end); start 119 arch/mips/pci/pci-malta.c BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && start 121 arch/mips/pci/pci-malta.c gt64120_mem_resource.start = start; start 123 arch/mips/pci/pci-malta.c gt64120_controller.mem_offset = (start & mask) - (map & mask); start 125 arch/mips/pci/pci-malta.c gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF; start 130 arch/mips/pci/pci-malta.c start = GT_READ(GT_PCI0IOLD_OFS); start 133 arch/mips/pci/pci-malta.c end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); start 134 arch/mips/pci/pci-malta.c mask = ~(start ^ end); start 136 arch/mips/pci/pci-malta.c BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && start 138 arch/mips/pci/pci-malta.c gt64120_io_resource.start = map & mask; start 142 arch/mips/pci/pci-malta.c gt64120_io_resource.start <<= GT_PCI_DCRM_SHF; start 160 arch/mips/pci/pci-malta.c start = BONITO_PCILO0_BASE; start 164 arch/mips/pci/pci-malta.c start = BONITO_PCILO1_BASE; start 169 arch/mips/pci/pci-malta.c start = BONITO_PCILO0_BASE; start 172 arch/mips/pci/pci-malta.c bonito64_mem_resource.start = start; start 173 arch/mips/pci/pci-malta.c bonito64_mem_resource.end = start + start 175 arch/mips/pci/pci-malta.c bonito64_controller.mem_offset = start - start 186 arch/mips/pci/pci-malta.c MSC_READ(MSC01_PCI_SC2PMBASL, start); start 189 arch/mips/pci/pci-malta.c msc_mem_resource.start = start & mask; start 190 arch/mips/pci/pci-malta.c msc_mem_resource.end = (start & mask) | ~mask; start 191 arch/mips/pci/pci-malta.c msc_controller.mem_offset = (start & mask) - (map & mask); start 193 arch/mips/pci/pci-malta.c write_gcr_reg0_base(start); start 197 arch/mips/pci/pci-malta.c MSC_READ(MSC01_PCI_SC2PIOBASL, start); start 200 arch/mips/pci/pci-malta.c msc_io_resource.start = map & mask; start 205 arch/mips/pci/pci-malta.c write_gcr_reg1_base(start); start 210 arch/mips/pci/pci-malta.c start = start & mask; start 211 arch/mips/pci/pci-malta.c end = start | ~mask; start 212 arch/mips/pci/pci-malta.c if ((start >= msc_mem_resource.start && start 213 arch/mips/pci/pci-malta.c start <= msc_mem_resource.end) || start 214 arch/mips/pci/pci-malta.c (end >= msc_mem_resource.start && start 217 arch/mips/pci/pci-malta.c start = max(start, msc_mem_resource.start); start 219 arch/mips/pci/pci-malta.c if (start - msc_mem_resource.start >= start 221 arch/mips/pci/pci-malta.c msc_mem_resource.end = start - 1; start 223 arch/mips/pci/pci-malta.c msc_mem_resource.start = end + 1; start 233 arch/mips/pci/pci-malta.c if (controller->io_resource->start < 0x00001000UL) start 234 arch/mips/pci/pci-malta.c controller->io_resource->start = 0x00001000UL; start 302 arch/mips/pci/pci-mt7620.c iomem_resource.start = 0; start 304 arch/mips/pci/pci-mt7620.c ioport_resource.start = 0; start 328 arch/mips/pci/pci-octeon.c .start = 0, start 339 arch/mips/pci/pci-octeon.c .start = 0x4000, start 589 arch/mips/pci/pci-octeon.c ioport_resource.start = 0; start 651 arch/mips/pci/pci-octeon.c octeon_pci_mem_resource.start = start 655 arch/mips/pci/pci-octeon.c octeon_pci_mem_resource.start + (1ul << 30); start 687 arch/mips/pci/pci-octeon.c octeon_pci_mem_resource.start = start 691 arch/mips/pci/pci-octeon.c octeon_pci_mem_resource.start + (1ul << 30); start 53 arch/mips/pci/pci-rc32434.c .start = 0x50000000, start 62 arch/mips/pci/pci-rc32434.c .start = 0x60000000, start 72 arch/mips/pci/pci-rc32434.c .start = 0x18800000, start 211 arch/mips/pci/pci-rc32434.c ioport_resource.start = rc32434_res_pci_io1.start; start 216 arch/mips/pci/pci-rc32434.c io_map_base = ioremap(rc32434_res_pci_io1.start, start 223 arch/mips/pci/pci-rc32434.c (unsigned long)io_map_base - rc32434_res_pci_io1.start; start 134 arch/mips/pci/pci-rt2880.c .start = RT2880_PCI_MEM_BASE, start 141 arch/mips/pci/pci-rt2880.c .start = RT2880_PCI_IO_BASE, start 227 arch/mips/pci/pci-rt2880.c ioport_resource.start = RT2880_PCI_IO_BASE; start 502 arch/mips/pci/pci-rt3883.c rt3883_pci_w32(rpc, rpc->mem_res.start, RT3883_PCI_REG_MEMBASE); start 503 arch/mips/pci/pci-rt3883.c rt3883_pci_w32(rpc, rpc->io_res.start, RT3883_PCI_REG_IOBASE); start 505 arch/mips/pci/pci-rt3883.c ioport_resource.start = rpc->io_res.start; start 178 arch/mips/pci/pci-sb1250.c .start = 0x40000000UL, start 185 arch/mips/pci/pci-sb1250.c .start = 0x00000000UL, start 103 arch/mips/pci/pci-virtio-guest.c .start = 0x10000000, start 110 arch/mips/pci/pci-virtio-guest.c .start = 0, start 60 arch/mips/pci/pci-vr41xx.c .start = PCI_MEM_RESOURCE_START, start 67 arch/mips/pci/pci-vr41xx.c .start = PCI_IO_RESOURCE_START, start 286 arch/mips/pci/pci-vr41xx.c ioport_resource.start = IO_PORT_RESOURCE_START; start 158 arch/mips/pci/pci-xlp.c .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ start 165 arch/mips/pci/pci-xlp.c .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */ start 300 arch/mips/pci/pci-xlp.c ioport_resource.start = 0; start 140 arch/mips/pci/pci-xlr.c .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ start 147 arch/mips/pci/pci-xlr.c .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ start 339 arch/mips/pci/pci-xlr.c ioport_resource.start = 0; start 462 arch/mips/pci/pci-xtalk-bridge.c bc->busn.start = 0; start 48 arch/mips/pci/pci.c const struct resource *rsrc, resource_size_t *start, start 53 arch/mips/pci/pci.c *start = fixup_bigphys_addr(rsrc->start, size); start 54 arch/mips/pci/pci.c *end = rsrc->start + size - 1; start 1888 arch/mips/pci/pcie-octeon.c ioport_resource.start = 0; start 1900 arch/mips/pci/pcie-octeon.c octeon_dummy_controller.mem_resource->start = (1ull<<48); start 1945 arch/mips/pci/pcie-octeon.c octeon_pcie0_controller.mem_resource->start = start 1955 arch/mips/pci/pcie-octeon.c octeon_pcie0_controller.io_resource->start = 4 << 10; start 2027 arch/mips/pci/pcie-octeon.c octeon_pcie1_controller.mem_resource->start = start 2037 arch/mips/pci/pcie-octeon.c octeon_pcie1_controller.io_resource->start = start 2041 arch/mips/pci/pcie-octeon.c octeon_pcie1_controller.io_resource->start + start 129 arch/mips/pic32/pic32mzda/init.c lookup->phys_addr = res.start; start 45 arch/mips/pmcs-msp71xx/msp_eth.c .start = MSP_MAC0_BASE, start 50 arch/mips/pmcs-msp71xx/msp_eth.c .start = MSP_INT_MAC0, start 58 arch/mips/pmcs-msp71xx/msp_eth.c .start = MSP_MAC1_BASE, start 63 arch/mips/pmcs-msp71xx/msp_eth.c .start = MSP_INT_MAC1, start 37 arch/mips/pmcs-msp71xx/msp_setup.c void *start, *end, *iptr; start 52 arch/mips/pmcs-msp71xx/msp_setup.c : "=r" (start), "=r" (end) start 56 arch/mips/pmcs-msp71xx/msp_setup.c for (iptr = (void *)((unsigned int)start & ~(L1_CACHE_BYTES - 1)); start 43 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_USB0_HS_START, start 48 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_INT_USB, start 53 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_USB0_MAB_START, start 58 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_USB0_ID_START, start 83 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_USB0_HS_START, start 88 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_INT_USB, start 93 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_USB0_MAB_START, start 98 arch/mips/pmcs-msp71xx/msp_usb.c .start = MSP_USB0_ID_START, start 30 arch/mips/pnx833x/common/platform.c .start = PNX833X_UART0_PORTS_START, start 35 arch/mips/pnx833x/common/platform.c .start = PNX833X_PIC_UART0_INT, start 40 arch/mips/pnx833x/common/platform.c .start = PNX833X_UART1_PORTS_START, start 45 arch/mips/pnx833x/common/platform.c .start = PNX833X_PIC_UART1_INT, start 96 arch/mips/pnx833x/common/platform.c .start = PNX833X_USB_PORTS_START, start 101 arch/mips/pnx833x/common/platform.c .start = PNX833X_PIC_USB_INT, start 122 arch/mips/pnx833x/common/platform.c .start = PNX8335_IP3902_PORTS_START, start 128 arch/mips/pnx833x/common/platform.c .start = PNX8335_PIC_ETHERNET_INT, start 148 arch/mips/pnx833x/common/platform.c .start = PNX8335_SATA_PORTS_START, start 153 arch/mips/pnx833x/common/platform.c .start = PNX8335_PIC_SATA_INT, start 195 arch/mips/pnx833x/common/platform.c .start = PNX8335_NAND_BASE, start 42 arch/mips/pnx833x/common/setup.c ioport_resource.start = 0; start 44 arch/mips/pnx833x/common/setup.c iomem_resource.start = 0; start 164 arch/mips/ralink/irq.c if (!request_mem_region(res.start, resource_size(&res), start 168 arch/mips/ralink/irq.c rt_intc_membase = ioremap_nocache(res.start, start 41 arch/mips/ralink/of.c if (!request_mem_region(res.start, start 46 arch/mips/ralink/of.c return ioremap_nocache(res.start, resource_size(&res)); start 62 arch/mips/rb532/devices.c .start = ETH0_BASE_ADDR, start 67 arch/mips/rb532/devices.c .start = ETH0_DMA_RX_IRQ, start 72 arch/mips/rb532/devices.c .start = ETH0_DMA_TX_IRQ, start 77 arch/mips/rb532/devices.c .start = ETH0_RX_OVR_IRQ, start 82 arch/mips/rb532/devices.c .start = ETH0_TX_UND_IRQ, start 87 arch/mips/rb532/devices.c .start = ETH0_RX_DMA_ADDR, start 92 arch/mips/rb532/devices.c .start = ETH0_TX_DMA_ADDR, start 116 arch/mips/rb532/devices.c .start = (8 + 4 * 32 + CF_GPIO_NUM), /* 149 */ start 206 arch/mips/rb532/devices.c .start = INTEG0_BASE_ADDR, start 279 arch/mips/rb532/devices.c cf_slot0_res[0].start = start 281 arch/mips/rb532/devices.c cf_slot0_res[0].end = cf_slot0_res[0].start + 0x1000; start 285 arch/mips/rb532/devices.c nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE); start 286 arch/mips/rb532/devices.c nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000; start 48 arch/mips/rb532/gpio.c .start = REGBASE + GPIOBASE, start 195 arch/mips/rb532/gpio.c rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); start 31 arch/mips/rb532/prom.c .start = DDR0_PHYS_ADDR, start 113 arch/mips/rb532/prom.c ddr = ioremap_nocache(ddr_reg[0].start, start 114 arch/mips/rb532/prom.c ddr_reg[0].end - ddr_reg[0].start); start 23 arch/mips/rb532/setup.c .start = PCI0_BASE_ADDR, start 52 arch/mips/rb532/setup.c pci_reg = ioremap_nocache(pci0_res[0].start, start 53 arch/mips/rb532/setup.c pci0_res[0].end - pci0_res[0].start); start 370 arch/mips/sgi-ip22/ip22-gio.c gio_dev->resource.start = addr; start 391 arch/mips/sgi-ip22/ip22-gio.c .start = GIO_SLOT_GFX_BASE, start 18 arch/mips/sgi-ip22/ip22-platform.c .start = SGI_WD93_0_IRQ, start 46 arch/mips/sgi-ip22/ip22-platform.c .start = SGI_WD93_1_IRQ, start 100 arch/mips/sgi-ip22/ip22-platform.c .start = SGI_ENET_IRQ, start 125 arch/mips/sgi-ip22/ip22-platform.c .start = SGI_GIO_0_IRQ, start 215 arch/mips/sgi-ip22/ip22-platform.c res.start = HPC3_CHIP0_BASE + offsetof(struct hpc3_regs, rtcregs); start 216 arch/mips/sgi-ip22/ip22-platform.c res.end = res.start + sizeof(hpc3c0->rtcregs) - 1; start 165 arch/mips/sgi-ip27/ip27-init.c ioport_resource.start = 0; start 51 arch/mips/sgi-ip27/ip27-klconfig.c lboard_t *find_lboard(lboard_t *start, unsigned char brd_type) start 54 arch/mips/sgi-ip27/ip27-klconfig.c while (start) { start 55 arch/mips/sgi-ip27/ip27-klconfig.c if (start->brd_type == brd_type) start 56 arch/mips/sgi-ip27/ip27-klconfig.c return start; start 57 arch/mips/sgi-ip27/ip27-klconfig.c start = KLCF_NEXT(start); start 63 arch/mips/sgi-ip27/ip27-klconfig.c lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type) start 66 arch/mips/sgi-ip27/ip27-klconfig.c while (start) { start 67 arch/mips/sgi-ip27/ip27-klconfig.c if (KLCLASS(start->brd_type) == KLCLASS(brd_type)) start 68 arch/mips/sgi-ip27/ip27-klconfig.c return start; start 69 arch/mips/sgi-ip27/ip27-klconfig.c start = KLCF_NEXT(start); start 197 arch/mips/sgi-ip27/ip27-timer.c res.start = XPHYSADDR(KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base + start 199 arch/mips/sgi-ip27/ip27-timer.c res.end = res.start + 32767; start 49 arch/mips/sgi-ip27/ip27-xtalk.c bd->mem.start = offset + (widget << SWIN_SIZE_BITS); start 50 arch/mips/sgi-ip27/ip27-xtalk.c bd->mem.end = bd->mem.start + SWIN_SIZE - 1; start 55 arch/mips/sgi-ip27/ip27-xtalk.c bd->io.start = offset + (widget << SWIN_SIZE_BITS); start 56 arch/mips/sgi-ip27/ip27-xtalk.c bd->io.end = bd->io.start + SWIN_SIZE - 1; start 100 arch/mips/sgi-ip32/ip32-platform.c .start = MACEISA_RTC_IRQ, start 104 arch/mips/sgi-ip32/ip32-platform.c .start = MACE_RTC_RES_START, start 31 arch/mips/sibyte/swarm/platform.c .start = K_INT_GB_IDE, start 76 arch/mips/sibyte/swarm/platform.c r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT); start 78 arch/mips/sibyte/swarm/platform.c r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT); start 92 arch/mips/sibyte/swarm/platform.c .start = A_MAC_CHANNEL_BASE(num), \ start 45 arch/mips/sni/a20r.c .start = 0x1c081ffc, start 59 arch/mips/sni/a20r.c .start = 0x18000000, start 64 arch/mips/sni/a20r.c .start = 0x18010000, start 69 arch/mips/sni/a20r.c .start = 0x1ff00000, start 74 arch/mips/sni/a20r.c .start = 22, start 91 arch/mips/sni/a20r.c .start = 0x19000000, start 96 arch/mips/sni/a20r.c .start = 19, start 110 arch/mips/sni/a20r.c .start = 0x1c070000, start 115 arch/mips/sni/a20r.c .start = 20, start 95 arch/mips/sni/pcimt.c .start = 0x70, start 100 arch/mips/sni/pcimt.c .start = 8, start 114 arch/mips/sni/pcimt.c .start = 0x00000000UL, start 122 arch/mips/sni/pcimt.c .start = 0x00, start 127 arch/mips/sni/pcimt.c .start = 0x40, start 132 arch/mips/sni/pcimt.c .start = 0x60, start 137 arch/mips/sni/pcimt.c .start = 0x80, start 142 arch/mips/sni/pcimt.c .start = 0xc0, start 147 arch/mips/sni/pcimt.c .start = 0xcfc, start 160 arch/mips/sni/pcimt.c .start = 0x1a000000, start 168 arch/mips/sni/pcimt.c .start = 0x18000000UL, start 63 arch/mips/sni/pcit.c .start = 0x70, start 68 arch/mips/sni/pcit.c .start = 8, start 86 arch/mips/sni/pcit.c .start = 0x00000000UL, start 94 arch/mips/sni/pcit.c .start = 0x00, start 99 arch/mips/sni/pcit.c .start = 0x40, start 104 arch/mips/sni/pcit.c .start = 0x60, start 109 arch/mips/sni/pcit.c .start = 0x80, start 114 arch/mips/sni/pcit.c .start = 0xc0, start 119 arch/mips/sni/pcit.c .start = 0xcf8, start 124 arch/mips/sni/pcit.c .start = 0xcfc, start 145 arch/mips/sni/pcit.c .start = 0x18000000UL, start 52 arch/mips/sni/rm200.c .start = 0x1cd41ffc, start 66 arch/mips/sni/rm200.c .start = 0x18000000, start 71 arch/mips/sni/rm200.c .start = 0x1b000000, start 76 arch/mips/sni/rm200.c .start = 0x1ff00000, start 81 arch/mips/sni/rm200.c .start = 27, start 98 arch/mips/sni/rm200.c .start = 0x19000000, start 103 arch/mips/sni/rm200.c .start = 26, start 367 arch/mips/sni/rm200.c .start = 0x16000020, start 374 arch/mips/sni/rm200.c .start = 0x160000a0, start 140 arch/mips/txx9/generic/pci.c pcic->mem_resource[0].start = mem_base; start 171 arch/mips/txx9/generic/pci.c pcic->mem_resource[1].start = io_base; start 189 arch/mips/txx9/generic/pci.c io_base = pcic->mem_resource[1].start; start 196 arch/mips/txx9/generic/pci.c set_io_port_base(IO_BASE + pcic->mem_resource[1].start); start 197 arch/mips/txx9/generic/pci.c pcic->io_resource->start = 0; start 199 arch/mips/txx9/generic/pci.c pcic->io_map_base = IO_BASE + pcic->mem_resource[1].start; start 202 arch/mips/txx9/generic/pci.c pcic->io_resource->start = start 207 arch/mips/txx9/generic/pci.c pcic->io_resource->end = pcic->io_resource->start + io_size - 1; start 70 arch/mips/txx9/generic/setup.c txx9_reg_res.start = base & 0xfffffffffULL; start 392 arch/mips/txx9/generic/setup.c .start = base, start 418 arch/mips/txx9/generic/setup.c .start = base, start 422 arch/mips/txx9/generic/setup.c .start = irq, start 497 arch/mips/txx9/generic/setup.c ioport_resource.start = 0; start 499 arch/mips/txx9/generic/setup.c iomem_resource.start = 0; start 618 arch/mips/txx9/generic/setup.c .start = addr, start 656 arch/mips/txx9/generic/setup.c .start = baseaddr, start 794 arch/mips/txx9/generic/setup.c .start = baseaddr, start 799 arch/mips/txx9/generic/setup.c .start = irq, start 826 arch/mips/txx9/generic/setup.c chan_res[0].start = irq + i; start 851 arch/mips/txx9/generic/setup.c .start = baseaddr, start 855 arch/mips/txx9/generic/setup.c .start = irq, start 859 arch/mips/txx9/generic/setup.c .start = dma_base + dma_chan_out, start 863 arch/mips/txx9/generic/setup.c .start = dma_base + dma_chan_in, start 939 arch/mips/txx9/generic/setup.c dev->base = ioremap(r->start, size); start 43 arch/mips/txx9/generic/setup_tx3927.c txx9_ce_res[i].start = (unsigned long)TX3927_ROMC_BA(i); start 45 arch/mips/txx9/generic/setup_tx3927.c txx9_ce_res[i].start + TX3927_ROMC_SIZE(i) - 1; start 130 arch/mips/txx9/generic/setup_tx3927.c unsigned long start = txx9_ce_res[ch].start; start 131 arch/mips/txx9/generic/setup_tx3927.c unsigned long size = txx9_ce_res[ch].end - start + 1; start 135 arch/mips/txx9/generic/setup_tx3927.c txx9_physmap_flash_init(ch, start, size, &pdata); start 103 arch/mips/txx9/generic/setup_tx4927.c txx9_ce_res[i].start = (unsigned long)TX4927_EBUSC_BA(i); start 105 arch/mips/txx9/generic/setup_tx4927.c txx9_ce_res[i].start + TX4927_EBUSC_SIZE(i) - 1; start 203 arch/mips/txx9/generic/setup_tx4927.c tx4927_sdram_resource[i].start = base; start 246 arch/mips/txx9/generic/setup_tx4927.c unsigned long start = txx9_ce_res[ch].start; start 247 arch/mips/txx9/generic/setup_tx4927.c unsigned long size = txx9_ce_res[ch].end - start + 1; start 251 arch/mips/txx9/generic/setup_tx4927.c txx9_physmap_flash_init(ch, start, size, &pdata); start 108 arch/mips/txx9/generic/setup_tx4938.c txx9_ce_res[i].start = (unsigned long)TX4938_EBUSC_BA(i); start 110 arch/mips/txx9/generic/setup_tx4938.c txx9_ce_res[i].start + TX4938_EBUSC_SIZE(i) - 1; start 216 arch/mips/txx9/generic/setup_tx4938.c tx4938_sdram_resource[i].start = base; start 227 arch/mips/txx9/generic/setup_tx4938.c tx4938_sram_resource.start = start 231 arch/mips/txx9/generic/setup_tx4938.c tx4938_sram_resource.start + TX4938_SRAM_SIZE - 1; start 325 arch/mips/txx9/generic/setup_tx4938.c unsigned long start = txx9_ce_res[ch].start; start 326 arch/mips/txx9/generic/setup_tx4938.c unsigned long size = txx9_ce_res[ch].end - start + 1; start 330 arch/mips/txx9/generic/setup_tx4938.c txx9_physmap_flash_init(ch, start, size, &pdata); start 341 arch/mips/txx9/generic/setup_tx4938.c .start = irq, start 369 arch/mips/txx9/generic/setup_tx4938.c res[0].start = ((ebccr >> 48) << 20) + 0x10000; start 370 arch/mips/txx9/generic/setup_tx4938.c res[0].end = res[0].start + 0x20000 - 1; start 427 arch/mips/txx9/generic/setup_tx4938.c if (tx4938_sram_resource.start) start 100 arch/mips/txx9/generic/setup_tx4939.c unsigned long start, size; start 107 arch/mips/txx9/generic/setup_tx4939.c start = (unsigned long)(win >> 48); start 108 arch/mips/txx9/generic/setup_tx4939.c size = (((unsigned long)(win >> 32) & 0xffff) + 1) - start; start 109 arch/mips/txx9/generic/setup_tx4939.c add_memory_region(start << 20, size << 20, BOOT_MEM_RAM); start 128 arch/mips/txx9/generic/setup_tx4939.c txx9_ce_res[i].start = (unsigned long)TX4939_EBUSC_BA(i); start 130 arch/mips/txx9/generic/setup_tx4939.c txx9_ce_res[i].start + TX4939_EBUSC_SIZE(i) - 1; start 235 arch/mips/txx9/generic/setup_tx4939.c tx4939_sdram_resource[i].start = start 249 arch/mips/txx9/generic/setup_tx4939.c tx4939_sram_resource.start = start 253 arch/mips/txx9/generic/setup_tx4939.c tx4939_sram_resource.start + TX4939_SRAM_SIZE - 1; start 379 arch/mips/txx9/generic/setup_tx4939.c unsigned long start = txx9_ce_res[ch].start; start 380 arch/mips/txx9/generic/setup_tx4939.c unsigned long size = txx9_ce_res[ch].end - start + 1; start 384 arch/mips/txx9/generic/setup_tx4939.c txx9_physmap_flash_init(ch, start, size, &pdata); start 392 arch/mips/txx9/generic/setup_tx4939.c .start = TX4939_ATA_REG_PHYS(0), start 396 arch/mips/txx9/generic/setup_tx4939.c .start = TXX9_IRQ_BASE + TX4939_IR_ATA(0), start 402 arch/mips/txx9/generic/setup_tx4939.c .start = TX4939_ATA_REG_PHYS(1), start 406 arch/mips/txx9/generic/setup_tx4939.c .start = TXX9_IRQ_BASE + TX4939_IR_ATA(1), start 436 arch/mips/txx9/generic/setup_tx4939.c .start = TX4939_RTC_REG & 0xfffffffffULL, start 440 arch/mips/txx9/generic/setup_tx4939.c .start = TXX9_IRQ_BASE + TX4939_IR_RTC, start 496 arch/mips/txx9/generic/setup_tx4939.c if (tx4939_sram_resource.start) start 503 arch/mips/txx9/generic/setup_tx4939.c .start = TX4939_RNG_REG & 0xfffffffffULL, start 176 arch/mips/txx9/jmr3927/setup.c .start = JMR3927_IOC_NVRAMB_ADDR - IO_BASE, start 290 arch/mips/txx9/rbtx4927/setup.c .start = RBTX4927_BRAMRTC_BASE - IO_BASE, start 301 arch/mips/txx9/rbtx4927/setup.c .start = RBTX4927_RTL_8019_BASE, start 305 arch/mips/txx9/rbtx4927/setup.c .start = RBTX4927_RTL_8019_IRQ, start 213 arch/mips/txx9/rbtx4938/setup.c rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR); start 231 arch/mips/txx9/rbtx4938/setup.c .start = RBTX4938_RTL_8019_BASE, start 235 arch/mips/txx9/rbtx4938/setup.c .start = RBTX4938_RTL_8019_IRQ, start 423 arch/mips/txx9/rbtx4939/setup.c r->start = 0x1f000000 - i * 0x1000000; start 424 arch/mips/txx9/rbtx4939/setup.c r->end = r->start + 0x1000000 - 1; start 451 arch/mips/txx9/rbtx4939/setup.c .start = smc_addr, start 455 arch/mips/txx9/rbtx4939/setup.c .start = RBTX4939_IRQ_ETHER, start 21 arch/mips/vr41xx/casio-e55/setup.c ioport_resource.start = E55_ISA_IO_START; start 204 arch/mips/vr41xx/common/cmu.c unsigned long start, size; start 209 arch/mips/vr41xx/common/cmu.c start = CMU_TYPE1_BASE; start 214 arch/mips/vr41xx/common/cmu.c start = CMU_TYPE2_BASE; start 218 arch/mips/vr41xx/common/cmu.c start = CMU_TYPE3_BASE; start 226 arch/mips/vr41xx/common/cmu.c if (request_mem_region(start, size, "CMU") == NULL) start 229 arch/mips/vr41xx/common/cmu.c cmu_base = ioremap(start, size); start 231 arch/mips/vr41xx/common/cmu.c release_mem_region(start, size); start 19 arch/mips/vr41xx/common/giu.c .start = 0x0b000100, start 24 arch/mips/vr41xx/common/giu.c .start = 0x0b0002e0, start 29 arch/mips/vr41xx/common/giu.c .start = GIUINT_IRQ, start 37 arch/mips/vr41xx/common/giu.c .start = 0x0f000140, start 42 arch/mips/vr41xx/common/giu.c .start = GIUINT_IRQ, start 50 arch/mips/vr41xx/common/giu.c .start = 0x0f000140, start 55 arch/mips/vr41xx/common/giu.c .start = GIUINT_IRQ, start 22 arch/mips/vr41xx/common/init.c iomem_resource.start = IO_MEM_RESOURCE_START; start 87 arch/mips/vr41xx/common/pmu.c unsigned long start, size; start 92 arch/mips/vr41xx/common/pmu.c start = PMU_TYPE1_BASE; start 98 arch/mips/vr41xx/common/pmu.c start = PMU_TYPE2_BASE; start 106 arch/mips/vr41xx/common/pmu.c if (request_mem_region(start, size, "PMU") == NULL) start 109 arch/mips/vr41xx/common/pmu.c pmu_base = ioremap(start, size); start 111 arch/mips/vr41xx/common/pmu.c release_mem_region(start, size); start 18 arch/mips/vr41xx/common/rtc.c .start = 0x0b0000c0, start 23 arch/mips/vr41xx/common/rtc.c .start = 0x0b0001c0, start 28 arch/mips/vr41xx/common/rtc.c .start = ELAPSEDTIME_IRQ, start 33 arch/mips/vr41xx/common/rtc.c .start = RTCLONG1_IRQ, start 41 arch/mips/vr41xx/common/rtc.c .start = 0x0f000100, start 46 arch/mips/vr41xx/common/rtc.c .start = 0x0f000120, start 51 arch/mips/vr41xx/common/rtc.c .start = ELAPSEDTIME_IRQ, start 56 arch/mips/vr41xx/common/rtc.c .start = RTCLONG1_IRQ, start 24 arch/mips/vr41xx/common/siu.c .start = 0x0c000000, start 29 arch/mips/vr41xx/common/siu.c .start = SIU_IRQ, start 42 arch/mips/vr41xx/common/siu.c .start = 0x0f000800, start 47 arch/mips/vr41xx/common/siu.c .start = 0x0f000820, start 52 arch/mips/vr41xx/common/siu.c .start = SIU_IRQ, start 57 arch/mips/vr41xx/common/siu.c .start = DSIU_IRQ, start 138 arch/mips/vr41xx/common/siu.c port.mapbase = res[i].start; start 139 arch/mips/vr41xx/common/siu.c port.membase = (unsigned char __iomem *)KSEG1ADDR(res[i].start); start 21 arch/mips/vr41xx/ibm-workpad/setup.c ioport_resource.start = WORKPAD_ISA_IO_START; start 11 arch/nds32/include/asm/cacheflush.h void flush_icache_range(unsigned long start, unsigned long end); start 21 arch/nds32/include/asm/cacheflush.h unsigned long start, unsigned long end); start 25 arch/nds32/include/asm/cacheflush.h void flush_cache_vmap(unsigned long start, unsigned long end); start 26 arch/nds32/include/asm/cacheflush.h void flush_cache_vunmap(unsigned long start, unsigned long end); start 71 arch/nds32/include/asm/nds32.h * We defined at the start of the physical memory */ start 76 arch/nds32/include/asm/pmu.h void (*start)(struct nds32_pmu *nds32_pmu); start 23 arch/nds32/include/asm/proc-fns.h extern void cpu_dcache_inval_range(unsigned long start, unsigned long end); start 24 arch/nds32/include/asm/proc-fns.h extern void cpu_dcache_wb_range(unsigned long start, unsigned long end); start 25 arch/nds32/include/asm/proc-fns.h extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end); start 29 arch/nds32/include/asm/proc-fns.h extern void cpu_icache_inval_range(unsigned long start, unsigned long end); start 32 arch/nds32/include/asm/proc-fns.h extern void cpu_cache_wbinval_range(unsigned long start, start 35 arch/nds32/include/asm/proc-fns.h unsigned long start, start 39 arch/nds32/include/asm/proc-fns.h extern void cpu_dma_wb_range(unsigned long start, unsigned long end); start 40 arch/nds32/include/asm/proc-fns.h extern void cpu_dma_inval_range(unsigned long start, unsigned long end); start 41 arch/nds32/include/asm/proc-fns.h extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end); start 23 arch/nds32/include/asm/tlbflush.h static inline void local_flush_tlb_kernel_range(unsigned long start, start 26 arch/nds32/include/asm/tlbflush.h while (start < end) { start 27 arch/nds32/include/asm/tlbflush.h __nds32__tlbop_inv(start); start 29 arch/nds32/include/asm/tlbflush.h start += PAGE_SIZE; start 34 arch/nds32/include/asm/tlbflush.h unsigned long start, unsigned long end); start 33 arch/nds32/kernel/atl2c.c atl2c_base = ioremap(res.start, resource_size(&res)); start 14 arch/nds32/kernel/dma.c void (*fn)(unsigned long start, unsigned long end)) start 19 arch/nds32/kernel/dma.c unsigned long start; start 36 arch/nds32/kernel/dma.c start = (unsigned long)(addr + offset); start 37 arch/nds32/kernel/dma.c fn(start, start + len); start 40 arch/nds32/kernel/dma.c start = (unsigned long)phys_to_virt(paddr); start 41 arch/nds32/kernel/dma.c fn(start, start + size); start 649 arch/nds32/kernel/perf_event_cpu.c cpu_pmu->start = nds32_pmu_start; start 702 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->start(nds32_pmu); start 1051 arch/nds32/kernel/perf_event_cpu.c .start = nds32_start, start 378 arch/nds32/kernel/setup.c .start = c_start, start 29 arch/nds32/kernel/sys_nds32.c SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache) start 34 arch/nds32/kernel/sys_nds32.c vma = find_vma(current->mm, start); start 49 arch/nds32/kernel/sys_nds32.c cpu_cache_wbinval_range_check(vma, start, end, flushi, wbd); start 96 arch/nds32/kernel/vdso.c unsigned long start = current->mm->mmap_base, end, offset, addr; start 97 arch/nds32/kernel/vdso.c start = PAGE_ALIGN(start); start 100 arch/nds32/kernel/vdso.c end = (start + vdso_mapping_len + PMD_SIZE - 1) & PMD_MASK; start 105 arch/nds32/kernel/vdso.c if (end > start) { start 106 arch/nds32/kernel/vdso.c offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); start 107 arch/nds32/kernel/vdso.c addr = start + (offset << PAGE_SHIFT); start 109 arch/nds32/kernel/vdso.c addr = start; start 44 arch/nds32/kernel/vdso/gettimeofday.c static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start) start 47 arch/nds32/kernel/vdso/gettimeofday.c return vdata->seq_count != start; start 16 arch/nds32/mm/cacheflush.c void flush_icache_range(unsigned long start, unsigned long end) start 20 arch/nds32/mm/cacheflush.c start = start & ~(line_size - 1); start 23 arch/nds32/mm/cacheflush.c cpu_cache_wbinval_range(start, end, 1); start 134 arch/nds32/mm/cacheflush.c unsigned long start, unsigned long end) start 138 arch/nds32/mm/cacheflush.c if ((end - start) > 8 * PAGE_SIZE) { start 145 arch/nds32/mm/cacheflush.c while (start < end) { start 146 arch/nds32/mm/cacheflush.c if (va_present(vma->vm_mm, start)) start 147 arch/nds32/mm/cacheflush.c cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC); start 148 arch/nds32/mm/cacheflush.c start += PAGE_SIZE; start 166 arch/nds32/mm/cacheflush.c void flush_cache_vmap(unsigned long start, unsigned long end) start 172 arch/nds32/mm/cacheflush.c void flush_cache_vunmap(unsigned long start, unsigned long end) start 269 arch/nds32/mm/cacheflush.c unsigned long line_size, start, end, vto, flags; start 277 arch/nds32/mm/cacheflush.c start = (unsigned long)dst & ~(line_size - 1); start 281 arch/nds32/mm/cacheflush.c cpu_cache_wbinval_range(start, end, 1); start 172 arch/nds32/mm/proc.c void cpu_icache_inval_page(unsigned long start) start 177 arch/nds32/mm/proc.c end = start + PAGE_SIZE; start 188 arch/nds32/mm/proc.c } while (end != start); start 192 arch/nds32/mm/proc.c void cpu_dcache_inval_page(unsigned long start) start 197 arch/nds32/mm/proc.c end = start + PAGE_SIZE; start 208 arch/nds32/mm/proc.c } while (end != start); start 211 arch/nds32/mm/proc.c void cpu_dcache_wb_page(unsigned long start) start 217 arch/nds32/mm/proc.c end = start + PAGE_SIZE; start 228 arch/nds32/mm/proc.c } while (end != start); start 233 arch/nds32/mm/proc.c void cpu_dcache_wbinval_page(unsigned long start) start 238 arch/nds32/mm/proc.c end = start + PAGE_SIZE; start 261 arch/nds32/mm/proc.c } while (end != start); start 275 arch/nds32/mm/proc.c void cpu_icache_inval_range(unsigned long start, unsigned long end) start 281 arch/nds32/mm/proc.c while (end > start) { start 282 arch/nds32/mm/proc.c __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start)); start 283 arch/nds32/mm/proc.c start += line_size; start 288 arch/nds32/mm/proc.c void cpu_dcache_inval_range(unsigned long start, unsigned long end) start 294 arch/nds32/mm/proc.c while (end > start) { start 295 arch/nds32/mm/proc.c __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start)); start 296 arch/nds32/mm/proc.c start += line_size; start 300 arch/nds32/mm/proc.c void cpu_dcache_wb_range(unsigned long start, unsigned long end) start 307 arch/nds32/mm/proc.c while (end > start) { start 308 arch/nds32/mm/proc.c __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start)); start 309 arch/nds32/mm/proc.c start += line_size; start 315 arch/nds32/mm/proc.c void cpu_dcache_wbinval_range(unsigned long start, unsigned long end) start 321 arch/nds32/mm/proc.c while (end > start) { start 323 arch/nds32/mm/proc.c __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start)); start 325 arch/nds32/mm/proc.c __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start)); start 326 arch/nds32/mm/proc.c start += line_size; start 331 arch/nds32/mm/proc.c void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi) start 336 arch/nds32/mm/proc.c align_start = start & ~(line_size - 1); start 342 arch/nds32/mm/proc.c align_start = start & ~(line_size - 1); start 349 arch/nds32/mm/proc.c unsigned long start, unsigned long end, start 357 arch/nds32/mm/proc.c start = start & ~(line_size - 1); start 360 arch/nds32/mm/proc.c if ((end - start) > (8 * PAGE_SIZE)) { start 368 arch/nds32/mm/proc.c t_start = (start + PAGE_SIZE) & PAGE_MASK; start 371 arch/nds32/mm/proc.c if ((start & PAGE_MASK) == t_end) { start 372 arch/nds32/mm/proc.c if (va_present(vma->vm_mm, start)) { start 374 arch/nds32/mm/proc.c cpu_dcache_wbinval_range(start, end); start 376 arch/nds32/mm/proc.c cpu_icache_inval_range(start, end); start 381 arch/nds32/mm/proc.c if (va_present(vma->vm_mm, start)) { start 383 arch/nds32/mm/proc.c cpu_dcache_wbinval_range(start, t_start); start 385 arch/nds32/mm/proc.c cpu_icache_inval_range(start, t_start); start 407 arch/nds32/mm/proc.c static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op) start 410 arch/nds32/mm/proc.c unsigned long p_start = __pa(start); start 434 arch/nds32/mm/proc.c #define cpu_l2cache_op(start,end,op) do { } while (0) start 439 arch/nds32/mm/proc.c void cpu_dma_wb_range(unsigned long start, unsigned long end) start 444 arch/nds32/mm/proc.c start = start & (~(line_size - 1)); start 446 arch/nds32/mm/proc.c if (unlikely(start == end)) start 450 arch/nds32/mm/proc.c cpu_dcache_wb_range(start, end); start 451 arch/nds32/mm/proc.c cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB); start 456 arch/nds32/mm/proc.c void cpu_dma_inval_range(unsigned long start, unsigned long end) start 459 arch/nds32/mm/proc.c unsigned long old_start = start; start 463 arch/nds32/mm/proc.c start = start & (~(line_size - 1)); start 465 arch/nds32/mm/proc.c if (unlikely(start == end)) start 468 arch/nds32/mm/proc.c if (start != old_start) { start 469 arch/nds32/mm/proc.c cpu_dcache_wbinval_range(start, start + line_size); start 470 arch/nds32/mm/proc.c cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL); start 476 arch/nds32/mm/proc.c cpu_dcache_inval_range(start, end); start 477 arch/nds32/mm/proc.c cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL); start 483 arch/nds32/mm/proc.c void cpu_dma_wbinval_range(unsigned long start, unsigned long end) start 488 arch/nds32/mm/proc.c start = start & (~(line_size - 1)); start 490 arch/nds32/mm/proc.c if (unlikely(start == end)) start 494 arch/nds32/mm/proc.c cpu_dcache_wbinval_range(start, end); start 495 arch/nds32/mm/proc.c cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL); start 15 arch/nds32/mm/tlb.c unsigned long start, unsigned long end) start 19 arch/nds32/mm/tlb.c if ((end - start) > 0x400000) { start 29 arch/nds32/mm/tlb.c while (start < end) { start 30 arch/nds32/mm/tlb.c __nds32__tlbop_inv(start); start 32 arch/nds32/mm/tlb.c start += PAGE_SIZE; start 26 arch/nios2/include/asm/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 33 arch/nios2/include/asm/cacheflush.h extern void flush_icache_range(unsigned long start, unsigned long end); start 36 arch/nios2/include/asm/cacheflush.h #define flush_cache_vmap(start, end) flush_dcache_range(start, end) start 37 arch/nios2/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) flush_dcache_range(start, end) start 46 arch/nios2/include/asm/cacheflush.h extern void flush_dcache_range(unsigned long start, unsigned long end); start 47 arch/nios2/include/asm/cacheflush.h extern void invalidate_dcache_range(unsigned long start, unsigned long end); start 26 arch/nios2/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 28 arch/nios2/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 189 arch/nios2/kernel/cpuinfo.c .start = cpuinfo_start, start 48 arch/nios2/kernel/setup.c unsigned int start = (unsigned int) exception_handler_hook; start 51 arch/nios2/kernel/setup.c if (start == addr) { start 73 arch/nios2/kernel/setup.c : "r" (start), "r" (addr), "r" (tmp) start 81 arch/nios2/kernel/setup.c unsigned int start = (unsigned int) fast_handler; start 96 arch/nios2/kernel/setup.c : "r" (start), "r" (addr), "r" (end), "r" (tmp) start 13 arch/nios2/lib/delay.c cycles_t start = get_cycles(); start 15 arch/nios2/lib/delay.c while ((get_cycles() - start) < cycles) start 18 arch/nios2/mm/cacheflush.c static void __flush_dcache(unsigned long start, unsigned long end) start 22 arch/nios2/mm/cacheflush.c start &= ~(cpuinfo.dcache_line_size - 1); start 26 arch/nios2/mm/cacheflush.c if (end > start + cpuinfo.dcache_size) start 27 arch/nios2/mm/cacheflush.c end = start + cpuinfo.dcache_size; start 29 arch/nios2/mm/cacheflush.c for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { start 37 arch/nios2/mm/cacheflush.c static void __invalidate_dcache(unsigned long start, unsigned long end) start 41 arch/nios2/mm/cacheflush.c start &= ~(cpuinfo.dcache_line_size - 1); start 45 arch/nios2/mm/cacheflush.c for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { start 53 arch/nios2/mm/cacheflush.c static void __flush_icache(unsigned long start, unsigned long end) start 57 arch/nios2/mm/cacheflush.c start &= ~(cpuinfo.icache_line_size - 1); start 61 arch/nios2/mm/cacheflush.c if (end > start + cpuinfo.icache_size) start 62 arch/nios2/mm/cacheflush.c end = start + cpuinfo.icache_size; start 64 arch/nios2/mm/cacheflush.c for (addr = start; addr < end; addr += cpuinfo.icache_line_size) { start 113 arch/nios2/mm/cacheflush.c void flush_icache_range(unsigned long start, unsigned long end) start 115 arch/nios2/mm/cacheflush.c __flush_dcache(start, end); start 116 arch/nios2/mm/cacheflush.c __flush_icache(start, end); start 119 arch/nios2/mm/cacheflush.c void flush_dcache_range(unsigned long start, unsigned long end) start 121 arch/nios2/mm/cacheflush.c __flush_dcache(start, end); start 122 arch/nios2/mm/cacheflush.c __flush_icache(start, end); start 126 arch/nios2/mm/cacheflush.c void invalidate_dcache_range(unsigned long start, unsigned long end) start 128 arch/nios2/mm/cacheflush.c __invalidate_dcache(start, end); start 132 arch/nios2/mm/cacheflush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 135 arch/nios2/mm/cacheflush.c __flush_dcache(start, end); start 137 arch/nios2/mm/cacheflush.c __flush_icache(start, end); start 142 arch/nios2/mm/cacheflush.c unsigned long start = (unsigned long) page_address(page); start 143 arch/nios2/mm/cacheflush.c unsigned long end = start + PAGE_SIZE; start 145 arch/nios2/mm/cacheflush.c __flush_dcache(start, end); start 146 arch/nios2/mm/cacheflush.c __flush_icache(start, end); start 152 arch/nios2/mm/cacheflush.c unsigned long start = vmaddr; start 153 arch/nios2/mm/cacheflush.c unsigned long end = start + PAGE_SIZE; start 155 arch/nios2/mm/cacheflush.c __flush_dcache(start, end); start 157 arch/nios2/mm/cacheflush.c __flush_icache(start, end); start 167 arch/nios2/mm/cacheflush.c unsigned long start = (unsigned long)page_address(page); start 169 arch/nios2/mm/cacheflush.c __flush_dcache(start, start + PAGE_SIZE); start 191 arch/nios2/mm/cacheflush.c unsigned long start = (unsigned long)page_address(page); start 193 arch/nios2/mm/cacheflush.c flush_icache_range(start, start + PAGE_SIZE); start 65 arch/nios2/mm/dma-mapping.c unsigned long start = (unsigned long)page_address(page); start 67 arch/nios2/mm/dma-mapping.c flush_dcache_range(start, start + size); start 103 arch/nios2/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 108 arch/nios2/mm/tlb.c while (start < end) { start 109 arch/nios2/mm/tlb.c flush_tlb_one_pid(start, mmu_pid); start 110 arch/nios2/mm/tlb.c start += PAGE_SIZE; start 160 arch/nios2/mm/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 162 arch/nios2/mm/tlb.c while (start < end) { start 163 arch/nios2/mm/tlb.c flush_tlb_one(start); start 164 arch/nios2/mm/tlb.c start += PAGE_SIZE; start 72 arch/openrisc/include/asm/cacheflush.h #define flush_cache_range(vma, start, end) do { } while (0) start 76 arch/openrisc/include/asm/cacheflush.h #define flush_icache_range(start, end) do { } while (0) start 79 arch/openrisc/include/asm/cacheflush.h #define flush_cache_vmap(start, end) do { } while (0) start 80 arch/openrisc/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 37 arch/openrisc/include/asm/tlbflush.h unsigned long start, start 49 arch/openrisc/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 58 arch/openrisc/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, start 61 arch/openrisc/include/asm/tlbflush.h flush_tlb_range(NULL, start, end); start 402 arch/openrisc/kernel/setup.c .start = c_start, start 242 arch/openrisc/kernel/smp.c unsigned long start, unsigned long end) start 31 arch/openrisc/lib/delay.c cycles_t start = get_cycles(); start 33 arch/openrisc/lib/delay.c while ((get_cycles() - start) < cycles) start 98 arch/openrisc/mm/tlb.c unsigned long start, unsigned long end) start 107 arch/openrisc/mm/tlb.c for (addr = start; addr < end; addr += PAGE_SIZE) { start 219 arch/parisc/boot/compressed/misc.c static void flush_data_cache(char *start, unsigned long length) start 221 arch/parisc/boot/compressed/misc.c char *end = start + length; start 224 arch/parisc/boot/compressed/misc.c asm volatile("fdc 0(%0)" : : "r" (start)); start 225 arch/parisc/boot/compressed/misc.c asm volatile("fic 0(%%sr0,%0)" : : "r" (start)); start 226 arch/parisc/boot/compressed/misc.c start += 16; start 227 arch/parisc/boot/compressed/misc.c } while (start < end); start 32 arch/parisc/include/asm/alternative.h void apply_alternatives(struct alt_instr *start, struct alt_instr *end, start 46 arch/parisc/include/asm/cacheflush.h #define flush_kernel_dcache_range(start,size) \ start 47 arch/parisc/include/asm/cacheflush.h flush_kernel_dcache_range_asm((start), (start)+(size)); start 52 arch/parisc/include/asm/cacheflush.h #define flush_cache_vmap(start, end) flush_cache_all() start 53 arch/parisc/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) flush_cache_all() start 86 arch/parisc/include/asm/cacheflush.h unsigned long start, unsigned long end); start 17 arch/parisc/include/asm/tlbflush.h unsigned long start, unsigned long end); start 19 arch/parisc/include/asm/tlbflush.h #define flush_tlb_range(vma, start, end) \ start 20 arch/parisc/include/asm/tlbflush.h __flush_tlb_range((vma)->vm_mm->context, start, end) start 22 arch/parisc/include/asm/tlbflush.h #define flush_tlb_kernel_range(start, end) \ start 23 arch/parisc/include/asm/tlbflush.h __flush_tlb_range(0, start, end) start 52 arch/parisc/include/asm/unwind.h unsigned long start; start 70 arch/parisc/include/asm/unwind.h unsigned long gp, void *start, void *end); start 22 arch/parisc/kernel/alternative.c void __init_or_module apply_alternatives(struct alt_instr *start, start 29 arch/parisc/kernel/alternative.c for (entry = start; entry < end; entry++, index++) { start 392 arch/parisc/kernel/cache.c unsigned long size, start; start 427 arch/parisc/kernel/cache.c start = (unsigned long) _text; start 429 arch/parisc/kernel/cache.c while (start < (unsigned long) _end) { start 430 arch/parisc/kernel/cache.c flush_tlb_kernel_range(start, start + PAGE_SIZE); start 431 arch/parisc/kernel/cache.c start += PAGE_SIZE; start 487 arch/parisc/kernel/cache.c int __flush_tlb_range(unsigned long sid, unsigned long start, start 493 arch/parisc/kernel/cache.c end - start >= parisc_tlb_flush_threshold) { start 501 arch/parisc/kernel/cache.c while (start < end) { start 504 arch/parisc/kernel/cache.c pdtlb(start); start 505 arch/parisc/kernel/cache.c pitlb(start); start 507 arch/parisc/kernel/cache.c start += PAGE_SIZE; start 596 arch/parisc/kernel/cache.c unsigned long start, unsigned long end) start 602 arch/parisc/kernel/cache.c end - start >= parisc_cache_flush_threshold) { start 604 arch/parisc/kernel/cache.c flush_tlb_range(vma, start, end); start 610 arch/parisc/kernel/cache.c flush_user_dcache_range_asm(start, end); start 612 arch/parisc/kernel/cache.c flush_user_icache_range_asm(start, end); start 613 arch/parisc/kernel/cache.c flush_tlb_range(vma, start, end); start 650 arch/parisc/kernel/cache.c unsigned long start = (unsigned long)vaddr; start 651 arch/parisc/kernel/cache.c unsigned long end = start + size; start 655 arch/parisc/kernel/cache.c flush_tlb_kernel_range(start, end); start 660 arch/parisc/kernel/cache.c flush_kernel_dcache_range_asm(start, end); start 661 arch/parisc/kernel/cache.c flush_tlb_kernel_range(start, end); start 667 arch/parisc/kernel/cache.c unsigned long start = (unsigned long)vaddr; start 668 arch/parisc/kernel/cache.c unsigned long end = start + size; start 672 arch/parisc/kernel/cache.c flush_tlb_kernel_range(start, end); start 677 arch/parisc/kernel/cache.c purge_kernel_dcache_range_asm(start, end); start 678 arch/parisc/kernel/cache.c flush_tlb_kernel_range(start, end); start 238 arch/parisc/kernel/drivers.c if (pdev->hpa.start == d->hpa) { start 524 arch/parisc/kernel/drivers.c dev->hpa.start = hpa; start 798 arch/parisc/kernel/drivers.c ((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \ start 807 arch/parisc/kernel/drivers.c #define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW) start 808 arch/parisc/kernel/drivers.c #define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH) start 893 arch/parisc/kernel/drivers.c ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, start 967 arch/parisc/kernel/drivers.c unsigned long hpa = dev->hpa.start; start 992 arch/parisc/kernel/drivers.c unsigned long hpa = dev->hpa.start; start 129 arch/parisc/kernel/inventory.c set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start, start 139 arch/parisc/kernel/inventory.c if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) start 145 arch/parisc/kernel/inventory.c pmem_ptr->start_pfn = (start >> PAGE_SHIFT); start 11 arch/parisc/kernel/kexec.c unsigned long start, start 37 arch/parisc/kernel/kexec.c pr_debug(" start: %lx\n", kimage->start); start 75 arch/parisc/kernel/kexec.c unsigned long start, start 105 arch/parisc/kernel/kexec.c reloc(image->head & PAGE_MASK, image->start, phys); start 36 arch/parisc/kernel/kexec_file.c image->start = __pa(elf_info.ehdr->e_entry); start 42 arch/parisc/kernel/kexec_file.c kernel_load_addr, image->start); start 63 arch/parisc/kernel/patch.c unsigned long start = (unsigned long)addr; start 71 arch/parisc/kernel/patch.c flush_icache_range(start, end); start 96 arch/parisc/kernel/patch.c flush_icache_range(start, end); start 201 arch/parisc/kernel/pci.c resource_size_t mask, align, start = res->start; start 205 arch/parisc/kernel/pci.c res->parent, res->start, res->end, start 213 arch/parisc/kernel/pci.c start += mask; start 214 arch/parisc/kernel/pci.c start &= ~mask; start 216 arch/parisc/kernel/pci.c return start; start 795 arch/parisc/kernel/perf.c runway = ioremap_nocache(cpu_device->hpa.start, 4096); start 102 arch/parisc/kernel/processor.c txn_addr = dev->hpa.start; /* for legacy PDC */ start 129 arch/parisc/kernel/processor.c status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start); start 135 arch/parisc/kernel/processor.c &dev->hpa.start); start 148 arch/parisc/kernel/processor.c &dev->hpa.start, cpu_info.cpu_num, NR_CPUS); start 168 arch/parisc/kernel/processor.c p->hpa = dev->hpa.start; /* save CPU hpa */ start 191 arch/parisc/kernel/setup.c .start = c_start, start 241 arch/parisc/kernel/setup.c .start = F_EXTEND(0xfff80000), start 248 arch/parisc/kernel/setup.c .start = F_EXTEND(0xfffb0000), start 255 arch/parisc/kernel/setup.c .start = F_EXTEND(0xfffe0000), start 234 arch/parisc/kernel/signal.c unsigned long start, end; start 301 arch/parisc/kernel/signal.c start = (unsigned long) &frame->tramp[0]; start 303 arch/parisc/kernel/signal.c flush_user_dcache_range_asm(start, end); start 304 arch/parisc/kernel/signal.c flush_user_icache_range_asm(start, end); start 523 arch/parisc/kernel/signal.c unsigned long start = (unsigned long) &usp[2]; start 550 arch/parisc/kernel/signal.c flush_user_dcache_range_asm(start, end); start 551 arch/parisc/kernel/signal.c flush_user_icache_range_asm(start, end); start 75 arch/parisc/kernel/unwind.c if (addr >= kernel_unwind_table.start && start 83 arch/parisc/kernel/unwind.c if (addr >= table->start && start 103 arch/parisc/kernel/unwind.c struct unwind_table_entry *start = table_start; start 110 arch/parisc/kernel/unwind.c table->start = base_addr + start->region_start; start 113 arch/parisc/kernel/unwind.c table->length = end - start + 1; start 116 arch/parisc/kernel/unwind.c for (; start <= end; start++) { start 117 arch/parisc/kernel/unwind.c if (start < end && start 118 arch/parisc/kernel/unwind.c start->region_end > (start+1)->region_start) { start 120 arch/parisc/kernel/unwind.c start, start+1); start 123 arch/parisc/kernel/unwind.c start->region_start += base_addr; start 124 arch/parisc/kernel/unwind.c start->region_end += base_addr; start 135 arch/parisc/kernel/unwind.c unwind_table_sort(struct unwind_table_entry *start, start 138 arch/parisc/kernel/unwind.c sort(start, finish - start, sizeof(struct unwind_table_entry), start 145 arch/parisc/kernel/unwind.c void *start, void *end) start 149 arch/parisc/kernel/unwind.c struct unwind_table_entry *s = (struct unwind_table_entry *)start; start 157 arch/parisc/kernel/unwind.c unwind_table_init(table, name, base_addr, gp, start, end); start 179 arch/parisc/kernel/unwind.c long start, stop; start 182 arch/parisc/kernel/unwind.c start = (long)&__start___unwind[0]; start 186 arch/parisc/kernel/unwind.c start, stop, start 187 arch/parisc/kernel/unwind.c (stop - start) / sizeof(struct unwind_table_entry)); start 40 arch/parisc/math-emu/fpbits.h #define Bitfield_extract(start, length, object) \ start 41 arch/parisc/math-emu/fpbits.h ((object) >> (HOSTWDSZ - (start) - (length)) & \ start 44 arch/parisc/math-emu/fpbits.h #define Bitfield_signed_extract(start, length, object) \ start 45 arch/parisc/math-emu/fpbits.h ((int)((object) << start) >> (HOSTWDSZ - (length))) start 47 arch/parisc/math-emu/fpbits.h #define Bitfield_mask(start, len, object) \ start 48 arch/parisc/math-emu/fpbits.h ((object) & (((unsigned)-1 >> (HOSTWDSZ-len)) << (HOSTWDSZ-start-len))) start 50 arch/parisc/math-emu/fpbits.h #define Bitfield_deposit(value,start,len,object) object = \ start 51 arch/parisc/math-emu/fpbits.h ((object) & ~(((unsigned)-1 >> (HOSTWDSZ-len)) << (HOSTWDSZ-start-len))) | \ start 52 arch/parisc/math-emu/fpbits.h (((value) & ((unsigned)-1 >> (HOSTWDSZ-len))) << (HOSTWDSZ-start-len)) start 64 arch/parisc/mm/init.c .start = 0, start 175 arch/parisc/mm/init.c unsigned long start; start 179 arch/parisc/mm/init.c start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); start 181 arch/parisc/mm/init.c i, start, start + (size - 1), size >> 20); start 185 arch/parisc/mm/init.c res->start = start; start 186 arch/parisc/mm/init.c res->end = start + size - 1; start 258 arch/parisc/mm/init.c unsigned long start; start 264 arch/parisc/mm/init.c start = start_pfn << PAGE_SHIFT; start 268 arch/parisc/mm/init.c memblock_add(start, size); start 326 arch/parisc/mm/init.c data_resource.start = virt_to_phys(&data_start); start 328 arch/parisc/mm/init.c code_resource.start = virt_to_phys(_text); start 477 arch/parisc/mm/init.c unsigned long start = (unsigned long) __init_begin; start 480 arch/parisc/mm/init.c map_pages(start, __pa(start), end-start, start 710 arch/parisc/mm/init.c unsigned long start = pmem_ranges[i].start_pfn; start 712 arch/parisc/mm/init.c unsigned long end = start + size; start 714 arch/parisc/mm/init.c if (mem_start_pfn > start) start 715 arch/parisc/mm/init.c mem_start_pfn = start; start 17 arch/powerpc/boot/devtree.c void dt_fixup_memory(u64 start, u64 size) start 36 arch/powerpc/boot/devtree.c memreg[i++] = start >> 32; start 37 arch/powerpc/boot/devtree.c memreg[i++] = start & 0xffffffff; start 177 arch/powerpc/boot/oflib.c unsigned long start = (unsigned long)_start, end = (unsigned long)_end; start 185 arch/powerpc/boot/oflib.c addr = (unsigned long) of_claim(start, end - start, 0); start 187 arch/powerpc/boot/oflib.c start, end, end - start, addr); start 85 arch/powerpc/boot/ops.h void start(void); start 194 arch/powerpc/boot/ops.h void dt_fixup_memory(u64 start, u64 size); start 12 arch/powerpc/include/asm/book3s/32/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 14 arch/powerpc/include/asm/book3s/32/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 248 arch/powerpc/include/asm/book3s/64/hash.h extern int __meminit hash__vmemmap_create_mapping(unsigned long start, start 251 arch/powerpc/include/asm/book3s/64/hash.h extern void hash__vmemmap_remove_mapping(unsigned long start, start 254 arch/powerpc/include/asm/book3s/64/hash.h int hash__create_section_mapping(unsigned long start, unsigned long end, int nid); start 255 arch/powerpc/include/asm/book3s/64/hash.h int hash__remove_section_mapping(unsigned long start, unsigned long end); start 1049 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int __meminit vmemmap_create_mapping(unsigned long start, start 1054 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__vmemmap_create_mapping(start, page_size, phys); start 1055 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__vmemmap_create_mapping(start, page_size, phys); start 1059 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void vmemmap_remove_mapping(unsigned long start, start 1063 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__vmemmap_remove_mapping(start, page_size); start 1064 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__vmemmap_remove_mapping(start, page_size); start 271 arch/powerpc/include/asm/book3s/64/radix.h extern int __meminit radix__vmemmap_create_mapping(unsigned long start, start 274 arch/powerpc/include/asm/book3s/64/radix.h extern void radix__vmemmap_remove_mapping(unsigned long start, start 297 arch/powerpc/include/asm/book3s/64/radix.h int radix__create_section_mapping(unsigned long start, unsigned long end, int nid); start 298 arch/powerpc/include/asm/book3s/64/radix.h int radix__remove_section_mapping(unsigned long start, unsigned long end); start 103 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h unsigned long start, unsigned long end) start 107 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__flush_tlb_kernel_range(unsigned long start, start 116 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, start 45 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h unsigned long start, unsigned long end); start 46 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, start 49 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h unsigned long start, unsigned long end); start 50 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 52 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end); start 51 arch/powerpc/include/asm/book3s/64/tlbflush.h unsigned long start, unsigned long end) start 54 arch/powerpc/include/asm/book3s/64/tlbflush.h return radix__flush_pmd_tlb_range(vma, start, end); start 55 arch/powerpc/include/asm/book3s/64/tlbflush.h return hash__flush_tlb_range(vma, start, end); start 60 arch/powerpc/include/asm/book3s/64/tlbflush.h unsigned long start, start 64 arch/powerpc/include/asm/book3s/64/tlbflush.h return radix__flush_hugetlb_tlb_range(vma, start, end); start 65 arch/powerpc/include/asm/book3s/64/tlbflush.h return hash__flush_tlb_range(vma, start, end); start 69 arch/powerpc/include/asm/book3s/64/tlbflush.h unsigned long start, unsigned long end) start 72 arch/powerpc/include/asm/book3s/64/tlbflush.h return radix__flush_tlb_range(vma, start, end); start 73 arch/powerpc/include/asm/book3s/64/tlbflush.h return hash__flush_tlb_range(vma, start, end); start 76 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, start 80 arch/powerpc/include/asm/book3s/64/tlbflush.h return radix__flush_tlb_kernel_range(start, end); start 81 arch/powerpc/include/asm/book3s/64/tlbflush.h return hash__flush_tlb_kernel_range(start, end); start 19 arch/powerpc/include/asm/cacheflush.h #define flush_cache_range(vma, start, end) do { } while (0) start 22 arch/powerpc/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) do { } while (0) start 32 arch/powerpc/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 37 arch/powerpc/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } start 45 arch/powerpc/include/asm/cacheflush.h void flush_icache_range(unsigned long start, unsigned long stop); start 60 arch/powerpc/include/asm/cacheflush.h static inline void flush_dcache_range(unsigned long start, unsigned long stop) start 64 arch/powerpc/include/asm/cacheflush.h void *addr = (void *)(start & ~(bytes - 1)); start 86 arch/powerpc/include/asm/cacheflush.h static inline void clean_dcache_range(unsigned long start, unsigned long stop) start 90 arch/powerpc/include/asm/cacheflush.h void *addr = (void *)(start & ~(bytes - 1)); start 104 arch/powerpc/include/asm/cacheflush.h static inline void invalidate_dcache_range(unsigned long start, start 109 arch/powerpc/include/asm/cacheflush.h void *addr = (void *)(start & ~(bytes - 1)); start 29 arch/powerpc/include/asm/drmem.h #define for_each_drmem_lmb_in_range(lmb, start, end) \ start 30 arch/powerpc/include/asm/drmem.h for ((lmb) = (start); (lmb) < (end); (lmb)++) start 60 arch/powerpc/include/asm/head-64.h #define OPEN_FIXED_SECTION(sname, start, end) \ start 61 arch/powerpc/include/asm/head-64.h sname##_start = (start); \ start 63 arch/powerpc/include/asm/head-64.h sname##_len = (end) - (start); \ start 76 arch/powerpc/include/asm/head-64.h #define OPEN_TEXT_SECTION(start) \ start 80 arch/powerpc/include/asm/head-64.h text_start = (start) + 0x100; \ start 85 arch/powerpc/include/asm/head-64.h #define OPEN_TEXT_SECTION(start) \ start 86 arch/powerpc/include/asm/head-64.h text_start = (start); \ start 92 arch/powerpc/include/asm/head-64.h #define ZERO_FIXED_SECTION(sname, start, end) \ start 93 arch/powerpc/include/asm/head-64.h sname##_start = (start); \ start 95 arch/powerpc/include/asm/head-64.h sname##_len = (end) - (start); \ start 125 arch/powerpc/include/asm/head-64.h #define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start, size) \ start 127 arch/powerpc/include/asm/head-64.h name##_start = (start); \ start 128 arch/powerpc/include/asm/head-64.h .if ((start) % (size) != 0); \ start 134 arch/powerpc/include/asm/head-64.h .if (start) < sname##_start; \ start 138 arch/powerpc/include/asm/head-64.h . = (start) - sname##_start; \ start 142 arch/powerpc/include/asm/head-64.h #define FIXED_SECTION_ENTRY_END_LOCATION(sname, name, start, size) \ start 143 arch/powerpc/include/asm/head-64.h .if (start) + (size) > sname##_end; \ start 147 arch/powerpc/include/asm/head-64.h .if (. - name > (start) + (size) - name##_start); \ start 151 arch/powerpc/include/asm/head-64.h . = ((start) + (size) - sname##_start); \ start 87 arch/powerpc/include/asm/iommu.h unsigned long start; start 88 arch/powerpc/include/asm/kexec.h extern int overlaps_crashkernel(unsigned long start, unsigned long size); start 118 arch/powerpc/include/asm/kexec.h int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size); start 124 arch/powerpc/include/asm/kexec.h static inline int overlaps_crashkernel(unsigned long start, unsigned long size) start 61 arch/powerpc/include/asm/kvm_host.h unsigned long start, unsigned long end); start 62 arch/powerpc/include/asm/kvm_host.h extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); start 289 arch/powerpc/include/asm/kvm_ppc.h int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, start 291 arch/powerpc/include/asm/kvm_ppc.h int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); start 67 arch/powerpc/include/asm/macio.h return dev->resource[resource_no].start; start 78 arch/powerpc/include/asm/macio.h if (res->start == 0 || res->end == 0 || res->end < res->start) start 97 arch/powerpc/include/asm/macio.h return dev->interrupt[irq_no].start; start 235 arch/powerpc/include/asm/mmu_context.h unsigned long start, unsigned long end) start 237 arch/powerpc/include/asm/mmu_context.h if (start <= mm->context.vdso_base && mm->context.vdso_base < end) start 359 arch/powerpc/include/asm/nohash/64/pgtable.h extern int __meminit vmemmap_create_mapping(unsigned long start, start 362 arch/powerpc/include/asm/nohash/64/pgtable.h extern void vmemmap_remove_mapping(unsigned long start, start 32 arch/powerpc/include/asm/nohash/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 34 arch/powerpc/include/asm/nohash/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 204 arch/powerpc/include/asm/opal.h int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); start 48 arch/powerpc/include/asm/oprofile_impl.h int (*start) (struct op_counter_config *); start 32 arch/powerpc/include/asm/ppc-pci.h void *pci_traverse_device_nodes(struct device_node *start, start 19 arch/powerpc/include/asm/ps3stor.h u64 start; start 21 arch/powerpc/include/asm/rheap.h unsigned long start; start 41 arch/powerpc/include/asm/rheap.h unsigned long start; start 60 arch/powerpc/include/asm/rheap.h extern int rh_attach_region(rh_info_t * info, unsigned long start, int size); start 63 arch/powerpc/include/asm/rheap.h extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size); start 73 arch/powerpc/include/asm/rheap.h extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, start 77 arch/powerpc/include/asm/rheap.h extern int rh_free(rh_info_t * info, unsigned long start); start 90 arch/powerpc/include/asm/rheap.h extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner); start 61 arch/powerpc/include/asm/sections.h static inline int overlaps_interrupt_vector_text(unsigned long start, start 68 arch/powerpc/include/asm/sections.h return start < (unsigned long)__va(real_end) && start 72 arch/powerpc/include/asm/sections.h static inline int overlaps_kernel_text(unsigned long start, unsigned long end) start 74 arch/powerpc/include/asm/sections.h return start < (unsigned long)__init_end && start 65 arch/powerpc/include/asm/setup.h void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); start 67 arch/powerpc/include/asm/setup.h static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; start 29 arch/powerpc/include/asm/slice.h void slice_set_range_psize(struct mm_struct *mm, unsigned long start, start 16 arch/powerpc/include/asm/sparsemem.h extern int create_section_mapping(unsigned long start, unsigned long end, int nid); start 17 arch/powerpc/include/asm/sparsemem.h extern int remove_section_mapping(unsigned long start, unsigned long end); start 1944 arch/powerpc/kernel/eeh.c if (!r->flags || !r->start) start 2011 arch/powerpc/kernel/eeh.c mapped = ioremap(bar->start, PAGE_SIZE); start 191 arch/powerpc/kernel/eeh_cache.c resource_size_t start = pci_resource_start(dev,i); start 198 arch/powerpc/kernel/eeh_cache.c if (start == 0 || ~start == 0 || end == 0 || ~end == 0) start 200 arch/powerpc/kernel/eeh_cache.c eeh_addr_cache_insert(dev, start, end, flags); start 169 arch/powerpc/kernel/fadump.c u64 start, end; start 172 arch/powerpc/kernel/fadump.c start = max_t(u64, d_start, reg->base); start 176 arch/powerpc/kernel/fadump.c if (start > d_start) start 768 arch/powerpc/kernel/fadump.c u64 start, size; start 778 arch/powerpc/kernel/fadump.c start = mem_ranges[mrange_info->mem_range_cnt - 1].base; start 781 arch/powerpc/kernel/fadump.c if ((start + size) == base) start 797 arch/powerpc/kernel/fadump.c start = base; start 798 arch/powerpc/kernel/fadump.c mem_ranges[mrange_info->mem_range_cnt].base = start; start 802 arch/powerpc/kernel/fadump.c mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start); start 805 arch/powerpc/kernel/fadump.c start, end - 1, (end - start)); start 809 arch/powerpc/kernel/fadump.c static int fadump_exclude_reserved_area(u64 start, u64 end) start 817 arch/powerpc/kernel/fadump.c if ((ra_start < end) && (ra_end > start)) { start 818 arch/powerpc/kernel/fadump.c if ((start < ra_start) && (end > ra_end)) { start 820 arch/powerpc/kernel/fadump.c start, ra_start); start 826 arch/powerpc/kernel/fadump.c } else if (start < ra_start) { start 828 arch/powerpc/kernel/fadump.c start, ra_start); start 834 arch/powerpc/kernel/fadump.c ret = fadump_add_mem_range(&crash_mrange_info, start, end); start 879 arch/powerpc/kernel/fadump.c u64 start, end; start 891 arch/powerpc/kernel/fadump.c start = fw_dump.boot_mem_addr[i]; start 892 arch/powerpc/kernel/fadump.c end = start + fw_dump.boot_mem_sz[i]; start 893 arch/powerpc/kernel/fadump.c ret = fadump_add_mem_range(&crash_mrange_info, start, end); start 899 arch/powerpc/kernel/fadump.c start = (u64)reg->base; start 900 arch/powerpc/kernel/fadump.c end = start + (u64)reg->size; start 906 arch/powerpc/kernel/fadump.c if (start < fw_dump.boot_mem_top) { start 908 arch/powerpc/kernel/fadump.c start = fw_dump.boot_mem_top; start 914 arch/powerpc/kernel/fadump.c ret = fadump_exclude_reserved_area(start, end); start 1133 arch/powerpc/kernel/fadump.c static void fadump_release_reserved_area(u64 start, u64 end) start 1138 arch/powerpc/kernel/fadump.c spfn = PHYS_PFN(start); start 47 arch/powerpc/kernel/io-workarounds.c if (paddr >= res->start && paddr <= res->end) start 170 arch/powerpc/kernel/iommu.c unsigned long n, end, start; start 209 arch/powerpc/kernel/iommu.c (*handle >= pool->start) && (*handle < pool->end)) start 210 arch/powerpc/kernel/iommu.c start = *handle; start 212 arch/powerpc/kernel/iommu.c start = pool->hint; start 220 arch/powerpc/kernel/iommu.c if (start >= limit) start 221 arch/powerpc/kernel/iommu.c start = pool->start; start 229 arch/powerpc/kernel/iommu.c if ((start & mask) >= limit || pass > 0) { start 233 arch/powerpc/kernel/iommu.c start = pool->start; start 235 arch/powerpc/kernel/iommu.c start &= mask; start 246 arch/powerpc/kernel/iommu.c n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, start 251 arch/powerpc/kernel/iommu.c pool->hint = pool->start; start 261 arch/powerpc/kernel/iommu.c pool->hint = pool->start; start 368 arch/powerpc/kernel/iommu.c unsigned long largepool_start = tbl->large_pool.start; start 716 arch/powerpc/kernel/iommu.c p->start = tbl->poolsize * i; start 717 arch/powerpc/kernel/iommu.c p->hint = p->start; start 718 arch/powerpc/kernel/iommu.c p->end = p->start + tbl->poolsize; start 723 arch/powerpc/kernel/iommu.c p->start = tbl->poolsize * i; start 724 arch/powerpc/kernel/iommu.c p->hint = p->start; start 665 arch/powerpc/kernel/kvm.c u32 *start, *end; start 678 arch/powerpc/kernel/kvm.c start = (void*)_stext; start 688 arch/powerpc/kernel/kvm.c for (p = start; p < end; p++) { start 124 arch/powerpc/kernel/machine_kexec.c crashk_res.start = crash_base; start 128 arch/powerpc/kernel/machine_kexec.c if (crashk_res.end == crashk_res.start) { start 129 arch/powerpc/kernel/machine_kexec.c crashk_res.start = crashk_res.end = 0; start 139 arch/powerpc/kernel/machine_kexec.c if (crashk_res.start != KDUMP_KERNELBASE) start 143 arch/powerpc/kernel/machine_kexec.c crashk_res.start = KDUMP_KERNELBASE; start 145 arch/powerpc/kernel/machine_kexec.c if (!crashk_res.start) { start 152 arch/powerpc/kernel/machine_kexec.c crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); start 154 arch/powerpc/kernel/machine_kexec.c crashk_res.start = KDUMP_KERNELBASE; start 158 arch/powerpc/kernel/machine_kexec.c crash_base = PAGE_ALIGN(crashk_res.start); start 159 arch/powerpc/kernel/machine_kexec.c if (crash_base != crashk_res.start) { start 162 arch/powerpc/kernel/machine_kexec.c crashk_res.start = crash_base; start 167 arch/powerpc/kernel/machine_kexec.c crashk_res.end = crashk_res.start + crash_size - 1; start 173 arch/powerpc/kernel/machine_kexec.c crashk_res.start = crashk_res.end = 0; start 187 arch/powerpc/kernel/machine_kexec.c (unsigned long)(crashk_res.start >> 20), start 190 arch/powerpc/kernel/machine_kexec.c if (!memblock_is_region_memory(crashk_res.start, crash_size) || start 191 arch/powerpc/kernel/machine_kexec.c memblock_reserve(crashk_res.start, crash_size)) { start 193 arch/powerpc/kernel/machine_kexec.c crashk_res.start = crashk_res.end = 0; start 198 arch/powerpc/kernel/machine_kexec.c int overlaps_crashkernel(unsigned long start, unsigned long size) start 200 arch/powerpc/kernel/machine_kexec.c return (start + size) > crashk_res.start && start <= crashk_res.end; start 244 arch/powerpc/kernel/machine_kexec.c if (crashk_res.start != 0) { start 245 arch/powerpc/kernel/machine_kexec.c crashk_base = cpu_to_be_ulong(crashk_res.start), start 59 arch/powerpc/kernel/machine_kexec_32.c relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start); start 63 arch/powerpc/kernel/machine_kexec_32.c (*rnk)(page_list, reboot_code_buffer_phys, image->start); start 291 arch/powerpc/kernel/machine_kexec_64.c extern void kexec_sequence(void *newstack, unsigned long start, start 369 arch/powerpc/kernel/machine_kexec_64.c kexec_sequence(&kexec_stack, image->start, image, start 95 arch/powerpc/kernel/machine_kexec_file_64.c int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size) start 108 arch/powerpc/kernel/machine_kexec_file_64.c if (rsv_start == start && rsv_size == size) { start 460 arch/powerpc/kernel/pci-common.c if (offset < (rp->start & PAGE_MASK) || start 499 arch/powerpc/kernel/pci-common.c if (offset < rp->start || (offset + size) > rp->end) start 539 arch/powerpc/kernel/pci-common.c if (offset < rp->start || (offset + size) > rp->end) start 606 arch/powerpc/kernel/pci-common.c if (roffset < rp->start || (roffset + size) > rp->end) start 621 arch/powerpc/kernel/pci-common.c resource_size_t *start, resource_size_t *end) start 628 arch/powerpc/kernel/pci-common.c *start = region.start; start 640 arch/powerpc/kernel/pci-common.c *start = rsrc->start; start 763 arch/powerpc/kernel/pci-common.c res->start = range.cpu_addr; start 820 arch/powerpc/kernel/pci-common.c (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { start 825 arch/powerpc/kernel/pci-common.c res->end -= res->start; start 826 arch/powerpc/kernel/pci-common.c res->start = 0; start 864 arch/powerpc/kernel/pci-common.c if (region.start != 0) start 880 arch/powerpc/kernel/pci-common.c hose->mem_resources[i].start == hose->mem_offset[i]) start 891 arch/powerpc/kernel/pci-common.c if (((res->start - offset) & 0xfffffffful) != 0) start 930 arch/powerpc/kernel/pci-common.c res->start = 0; start 1078 arch/powerpc/kernel/pci-common.c resource_size_t start = res->start; start 1082 arch/powerpc/kernel/pci-common.c return start; start 1083 arch/powerpc/kernel/pci-common.c if (start & 0x300) start 1084 arch/powerpc/kernel/pci-common.c start = (start + 0x3ff) & ~0x3ff; start 1087 arch/powerpc/kernel/pci-common.c return start; start 1102 arch/powerpc/kernel/pci-common.c if (p->end < res->start) start 1104 arch/powerpc/kernel/pci-common.c if (res->end < p->start) start 1106 arch/powerpc/kernel/pci-common.c if (p->start < res->start || p->end > res->end) start 1169 arch/powerpc/kernel/pci-common.c if (!res || !res->flags || res->start > res->end || res->parent) start 1221 arch/powerpc/kernel/pci-common.c res->start = 0; start 1246 arch/powerpc/kernel/pci-common.c r->end -= r->start; start 1247 arch/powerpc/kernel/pci-common.c r->start = 0; start 1315 arch/powerpc/kernel/pci-common.c res->start = offset; start 1333 arch/powerpc/kernel/pci-common.c if ((pres->start - offset) <= 0xa0000 && start 1343 arch/powerpc/kernel/pci-common.c res->start = 0xa0000 + offset; start 1400 arch/powerpc/kernel/pci-common.c if (r->parent || !r->start || !r->flags) start 1607 arch/powerpc/kernel/pci-common.c hose->busn.start = hose->first_busno; start 1664 arch/powerpc/kernel/pci-common.c dev->resource[i].start = 0; start 138 arch/powerpc/kernel/pci-hotplug.c max = bus->busn_res.start; start 64 arch/powerpc/kernel/pci_32.c dev->resource[0].start = dev->resource[0].end = 0; start 66 arch/powerpc/kernel/pci_32.c dev->resource[1].start = dev->resource[1].end = 0; start 230 arch/powerpc/kernel/pci_32.c res->start += io_offset; start 103 arch/powerpc/kernel/pci_64.c __flush_hash_table_range(&init_mm, res->start + _IO_BASE, start 169 arch/powerpc/kernel/pci_64.c hose->io_resource.start += io_virt_offset; start 188 arch/powerpc/kernel/pci_64.c bus->resource[0]->start + _IO_BASE, start 396 arch/powerpc/kernel/pci_dn.c void *pci_traverse_device_nodes(struct device_node *start, start 404 arch/powerpc/kernel/pci_dn.c for (dn = start->child; dn; dn = nextdn) { start 431 arch/powerpc/kernel/pci_dn.c if (dn == start) start 163 arch/powerpc/kernel/pci_of_scan.c region.start = base; start 326 arch/powerpc/kernel/pci_of_scan.c region.start = of_read_number(&ranges[1], 2); start 327 arch/powerpc/kernel/pci_of_scan.c region.end = region.start + size - 1; start 1685 arch/powerpc/kernel/process.c void preload_new_slb_context(unsigned long start, unsigned long sp); start 1690 arch/powerpc/kernel/process.c void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) start 1697 arch/powerpc/kernel/process.c preload_new_slb_context(start, sp); start 1736 arch/powerpc/kernel/process.c regs->nip = start; start 1744 arch/powerpc/kernel/process.c entry = start; start 1754 arch/powerpc/kernel/process.c regs->gpr[12] = start; start 1766 arch/powerpc/kernel/process.c __get_user(entry, (unsigned long __user *)start); start 1767 arch/powerpc/kernel/process.c __get_user(toc, (unsigned long __user *)start+1); start 1781 arch/powerpc/kernel/process.c regs->nip = start; start 93 arch/powerpc/kernel/prom.c static inline int overlaps_initrd(unsigned long start, unsigned long size) start 99 arch/powerpc/kernel/prom.c return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && start 100 arch/powerpc/kernel/prom.c start <= _ALIGN_UP(initrd_end, PAGE_SIZE); start 115 arch/powerpc/kernel/prom.c unsigned long start, size; start 120 arch/powerpc/kernel/prom.c start = __pa(initial_boot_params); start 123 arch/powerpc/kernel/prom.c if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || start 124 arch/powerpc/kernel/prom.c !memblock_is_memory(start + size - 1) || start 125 arch/powerpc/kernel/prom.c overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) { start 431 arch/powerpc/kernel/prom.c crashk_res.start = *lprop; start 435 arch/powerpc/kernel/prom.c crashk_res.end = crashk_res.start + *lprop - 1; start 571 arch/powerpc/kernel/ptrace.c int start, end; start 580 arch/powerpc/kernel/ptrace.c start = 33 * sizeof(vector128); start 581 arch/powerpc/kernel/ptrace.c end = start + sizeof(vrsave); start 583 arch/powerpc/kernel/ptrace.c start, end); start 621 arch/powerpc/kernel/ptrace.c int start, end; start 630 arch/powerpc/kernel/ptrace.c start = 33 * sizeof(vector128); start 631 arch/powerpc/kernel/ptrace.c end = start + sizeof(vrsave); start 633 arch/powerpc/kernel/ptrace.c start, end); start 166 arch/powerpc/kernel/rtas_pci.c chip_regs = ioremap(registers.start & ~(0xfffffUL), 0x100000); start 207 arch/powerpc/kernel/rtas_pci.c return r.start; start 366 arch/powerpc/kernel/setup-common.c .start = c_start, start 447 arch/powerpc/kernel/time.c unsigned long start; start 452 arch/powerpc/kernel/time.c start = get_rtcl(); start 455 arch/powerpc/kernel/time.c diff = get_rtcl() - start; start 468 arch/powerpc/kernel/time.c start = get_tbl(); start 469 arch/powerpc/kernel/time.c while (get_tbl() - start < loops) start 530 arch/powerpc/kernel/vdso.c void *start; start 533 arch/powerpc/kernel/vdso.c start = find_section64(v64->hdr, "__ftr_fixup", &size); start 534 arch/powerpc/kernel/vdso.c if (start) start 536 arch/powerpc/kernel/vdso.c start, start + size); start 538 arch/powerpc/kernel/vdso.c start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size); start 539 arch/powerpc/kernel/vdso.c if (start) start 541 arch/powerpc/kernel/vdso.c start, start + size); start 543 arch/powerpc/kernel/vdso.c start = find_section64(v64->hdr, "__fw_ftr_fixup", &size); start 544 arch/powerpc/kernel/vdso.c if (start) start 546 arch/powerpc/kernel/vdso.c start, start + size); start 548 arch/powerpc/kernel/vdso.c start = find_section64(v64->hdr, "__lwsync_fixup", &size); start 549 arch/powerpc/kernel/vdso.c if (start) start 551 arch/powerpc/kernel/vdso.c start, start + size); start 555 arch/powerpc/kernel/vdso.c start = find_section32(v32->hdr, "__ftr_fixup", &size); start 556 arch/powerpc/kernel/vdso.c if (start) start 558 arch/powerpc/kernel/vdso.c start, start + size); start 560 arch/powerpc/kernel/vdso.c start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size); start 561 arch/powerpc/kernel/vdso.c if (start) start 563 arch/powerpc/kernel/vdso.c start, start + size); start 566 arch/powerpc/kernel/vdso.c start = find_section32(v32->hdr, "__fw_ftr_fixup", &size); start 567 arch/powerpc/kernel/vdso.c if (start) start 569 arch/powerpc/kernel/vdso.c start, start + size); start 572 arch/powerpc/kernel/vdso.c start = find_section32(v32->hdr, "__lwsync_fixup", &size); start 573 arch/powerpc/kernel/vdso.c if (start) start 575 arch/powerpc/kernel/vdso.c start, start + size); start 870 arch/powerpc/kvm/book3s.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) start 872 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); start 875 arch/powerpc/kvm/book3s.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) start 877 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->age_hva(kvm, start, end); start 12 arch/powerpc/kvm/book3s.h extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, start 14 arch/powerpc/kvm/book3s.h extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, start 770 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long start, start 784 arch/powerpc/kvm/book3s_64_mmu_hv.c hva_start = max(start, memslot->userspace_addr); start 891 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) start 896 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm_handle_hva_range(kvm, start, end, handler); start 982 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) start 987 arch/powerpc/kvm/book3s_64_mmu_hv.c return kvm_handle_hva_range(kvm, start, end, handler); start 81 arch/powerpc/kvm/book3s_hv_rm_xics.c static inline int grab_next_hostcore(int start, start 88 arch/powerpc/kvm/book3s_hv_rm_xics.c for (core = start + 1; core < max; core++) { start 392 arch/powerpc/kvm/book3s_pr.c static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, start 405 arch/powerpc/kvm/book3s_pr.c hva_start = max(start, memslot->userspace_addr); start 422 arch/powerpc/kvm/book3s_pr.c static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, start 425 arch/powerpc/kvm/book3s_pr.c do_kvm_unmap_hva(kvm, start, end); start 430 arch/powerpc/kvm/book3s_pr.c static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start, start 182 arch/powerpc/kvm/e500_mmu.c unsigned long start, end, size; start 185 arch/powerpc/kvm/e500_mmu.c start = get_tlb_eaddr(gtlbe) & ~(size - 1); start 186 arch/powerpc/kvm/e500_mmu.c end = start + size - 1; start 188 arch/powerpc/kvm/e500_mmu.c return vcpu_e500->tlb1_min_eaddr == start || start 196 arch/powerpc/kvm/e500_mmu.c unsigned long start, end, size; start 203 arch/powerpc/kvm/e500_mmu.c start = get_tlb_eaddr(gtlbe) & ~(size - 1); start 204 arch/powerpc/kvm/e500_mmu.c end = start + size - 1; start 206 arch/powerpc/kvm/e500_mmu.c vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); start 370 arch/powerpc/kvm/e500_mmu_host.c unsigned long start, end; start 375 arch/powerpc/kvm/e500_mmu_host.c start = vma->vm_pgoff; start 376 arch/powerpc/kvm/e500_mmu_host.c end = start + start 379 arch/powerpc/kvm/e500_mmu_host.c pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); start 384 arch/powerpc/kvm/e500_mmu_host.c if (start < slot_start) start 385 arch/powerpc/kvm/e500_mmu_host.c start = slot_start; start 412 arch/powerpc/kvm/e500_mmu_host.c if (gfn_start + pfn - gfn < start) start 737 arch/powerpc/kvm/e500_mmu_host.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) start 740 arch/powerpc/kvm/e500_mmu_host.c kvm_unmap_hva(kvm, start); start 745 arch/powerpc/kvm/e500_mmu_host.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) start 69 arch/powerpc/lib/feature-fixups.c unsigned int *start, *end, *alt_start, *alt_end, *src, *dest; start 71 arch/powerpc/lib/feature-fixups.c start = calc_addr(fcur, fcur->start_off); start 76 arch/powerpc/lib/feature-fixups.c if ((alt_end - alt_start) > (end - start)) start 83 arch/powerpc/lib/feature-fixups.c dest = start; start 120 arch/powerpc/lib/feature-fixups.c long *start, *end; start 123 arch/powerpc/lib/feature-fixups.c start = PTRRELOC(&__start___stf_entry_barrier_fixup), start 143 arch/powerpc/lib/feature-fixups.c for (i = 0; start < end; start++, i++) { start 144 arch/powerpc/lib/feature-fixups.c dest = (void *)start + *start; start 170 arch/powerpc/lib/feature-fixups.c long *start, *end; start 173 arch/powerpc/lib/feature-fixups.c start = PTRRELOC(&__start___stf_exit_barrier_fixup), start 204 arch/powerpc/lib/feature-fixups.c for (i = 0; start < end; start++, i++) { start 205 arch/powerpc/lib/feature-fixups.c dest = (void *)start + *start; start 234 arch/powerpc/lib/feature-fixups.c long *start, *end; start 237 arch/powerpc/lib/feature-fixups.c start = PTRRELOC(&__start___rfi_flush_fixup), start 257 arch/powerpc/lib/feature-fixups.c for (i = 0; start < end; start++, i++) { start 258 arch/powerpc/lib/feature-fixups.c dest = (void *)start + *start; start 280 arch/powerpc/lib/feature-fixups.c long *start, *end; start 283 arch/powerpc/lib/feature-fixups.c start = fixup_start; start 293 arch/powerpc/lib/feature-fixups.c for (i = 0; start < end; start++, i++) { start 294 arch/powerpc/lib/feature-fixups.c dest = (void *)start + *start; start 308 arch/powerpc/lib/feature-fixups.c void *start, *end; start 310 arch/powerpc/lib/feature-fixups.c start = PTRRELOC(&__start___barrier_nospec_fixup), start 313 arch/powerpc/lib/feature-fixups.c do_barrier_nospec_fixups_range(enable, start, end); start 321 arch/powerpc/lib/feature-fixups.c long *start, *end; start 324 arch/powerpc/lib/feature-fixups.c start = fixup_start; start 336 arch/powerpc/lib/feature-fixups.c for (i = 0; start < end; start++, i++) { start 337 arch/powerpc/lib/feature-fixups.c dest = (void *)start + *start; start 349 arch/powerpc/lib/feature-fixups.c unsigned int *start, *end; start 351 arch/powerpc/lib/feature-fixups.c start = (void *)curr + *curr; start 353 arch/powerpc/lib/feature-fixups.c for (; start < end; start++) { start 354 arch/powerpc/lib/feature-fixups.c pr_devel("patching dest %lx\n", (unsigned long)start); start 355 arch/powerpc/lib/feature-fixups.c patch_instruction(start, PPC_INST_NOP); start 361 arch/powerpc/lib/feature-fixups.c long *start, *end; start 363 arch/powerpc/lib/feature-fixups.c start = PTRRELOC(&__start__btb_flush_fixup); start 366 arch/powerpc/lib/feature-fixups.c for (; start < end; start += 2) start 367 arch/powerpc/lib/feature-fixups.c patch_btb_flush_section(start); start 373 arch/powerpc/lib/feature-fixups.c long *start, *end; start 379 arch/powerpc/lib/feature-fixups.c start = fixup_start; start 382 arch/powerpc/lib/feature-fixups.c for (; start < end; start++) { start 383 arch/powerpc/lib/feature-fixups.c dest = (void *)start + *start; start 17 arch/powerpc/lib/pmem.c unsigned long start = (unsigned long) addr; start 18 arch/powerpc/lib/pmem.c flush_dcache_range(start, start + size); start 24 arch/powerpc/lib/pmem.c unsigned long start = (unsigned long) addr; start 25 arch/powerpc/lib/pmem.c flush_dcache_range(start, start + size); start 35 arch/powerpc/lib/pmem.c unsigned long copied, start = (unsigned long) dest; start 38 arch/powerpc/lib/pmem.c flush_dcache_range(start, start + size); start 45 arch/powerpc/lib/pmem.c unsigned long start = (unsigned long) dest; start 48 arch/powerpc/lib/pmem.c flush_dcache_range(start, start + size); start 138 arch/powerpc/lib/rheap.c blk->start = 0; start 163 arch/powerpc/lib/rheap.c s = blkn->start; start 175 arch/powerpc/lib/rheap.c bs = blk->start; start 193 arch/powerpc/lib/rheap.c if (before && s != (before->start + before->size)) start 196 arch/powerpc/lib/rheap.c if (after && e != after->start) start 221 arch/powerpc/lib/rheap.c after->start -= size; start 240 arch/powerpc/lib/rheap.c if (blk->start > blkn->start) { start 329 arch/powerpc/lib/rheap.c int rh_attach_region(rh_info_t * info, unsigned long start, int size) start 336 arch/powerpc/lib/rheap.c s = start; start 350 arch/powerpc/lib/rheap.c start = s; start 359 arch/powerpc/lib/rheap.c blk->start = start; start 370 arch/powerpc/lib/rheap.c unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) start 381 arch/powerpc/lib/rheap.c s = start; start 398 arch/powerpc/lib/rheap.c bs = blk->start; start 399 arch/powerpc/lib/rheap.c be = blk->start + blk->size; start 419 arch/powerpc/lib/rheap.c blk->start += size; start 428 arch/powerpc/lib/rheap.c newblk->start = e; start 447 arch/powerpc/lib/rheap.c unsigned long start, sp_size; start 463 arch/powerpc/lib/rheap.c start = (blk->start + alignment - 1) & ~(alignment - 1); start 464 arch/powerpc/lib/rheap.c if (start + size <= blk->start + blk->size) start 481 arch/powerpc/lib/rheap.c sp_size = start - blk->start; start 486 arch/powerpc/lib/rheap.c spblk->start = blk->start; start 492 arch/powerpc/lib/rheap.c newblk->start = start; start 497 arch/powerpc/lib/rheap.c blk->start = start + size; start 509 arch/powerpc/lib/rheap.c return start; start 527 arch/powerpc/lib/rheap.c unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) start 538 arch/powerpc/lib/rheap.c s = start; start 555 arch/powerpc/lib/rheap.c bs = blk->start; start 556 arch/powerpc/lib/rheap.c be = blk->start + blk->size; start 571 arch/powerpc/lib/rheap.c start = blk->start; start 574 arch/powerpc/lib/rheap.c return start; start 581 arch/powerpc/lib/rheap.c blk->start += size; start 590 arch/powerpc/lib/rheap.c newblk2->start = e; start 597 arch/powerpc/lib/rheap.c newblk1->start = s; start 601 arch/powerpc/lib/rheap.c start = newblk1->start; start 604 arch/powerpc/lib/rheap.c return start; start 612 arch/powerpc/lib/rheap.c int rh_free(rh_info_t * info, unsigned long start) start 622 arch/powerpc/lib/rheap.c if (start < blk2->start) start 627 arch/powerpc/lib/rheap.c if (blk == NULL || start > (blk->start + blk->size)) start 667 arch/powerpc/lib/rheap.c stats->start = blk->start; start 679 arch/powerpc/lib/rheap.c int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) start 689 arch/powerpc/lib/rheap.c if (start < blk2->start) start 694 arch/powerpc/lib/rheap.c if (blk == NULL || start > (blk->start + blk->size)) start 723 arch/powerpc/lib/rheap.c st[i].start, st[i].start + st[i].size, start 734 arch/powerpc/lib/rheap.c st[i].start, st[i].start + st[i].size, start 744 arch/powerpc/lib/rheap.c blk, blk->start, blk->start + blk->size, blk->size); start 42 arch/powerpc/mm/book3s32/mmu.c unsigned long start; start 54 arch/powerpc/mm/book3s32/mmu.c if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) start 55 arch/powerpc/mm/book3s32/mmu.c return bat_addrs[b].phys + (va - bat_addrs[b].start); start 67 arch/powerpc/mm/book3s32/mmu.c && pa < (bat_addrs[b].limit-bat_addrs[b].start) start 69 arch/powerpc/mm/book3s32/mmu.c return bat_addrs[b].start+(pa-bat_addrs[b].phys); start 236 arch/powerpc/mm/book3s32/mmu.c if (bat_addrs[i].start < (unsigned long)__init_begin) start 292 arch/powerpc/mm/book3s32/mmu.c bat_addrs[index].start = virt; start 74 arch/powerpc/mm/book3s32/tlb.c static void flush_range(struct mm_struct *mm, unsigned long start, start 86 arch/powerpc/mm/book3s32/tlb.c start &= PAGE_MASK; start 87 arch/powerpc/mm/book3s32/tlb.c if (start >= end) start 90 arch/powerpc/mm/book3s32/tlb.c pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); start 92 arch/powerpc/mm/book3s32/tlb.c pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; start 96 arch/powerpc/mm/book3s32/tlb.c count = ((pmd_end - start) >> PAGE_SHIFT) + 1; start 97 arch/powerpc/mm/book3s32/tlb.c flush_hash_pages(ctx, start, pmd_val(*pmd), count); start 101 arch/powerpc/mm/book3s32/tlb.c start = pmd_end + 1; start 109 arch/powerpc/mm/book3s32/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 111 arch/powerpc/mm/book3s32/tlb.c flush_range(&init_mm, start, end); start 159 arch/powerpc/mm/book3s32/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 162 arch/powerpc/mm/book3s32/tlb.c flush_range(vma->vm_mm, start, end); start 107 arch/powerpc/mm/book3s64/hash_pgtable.c int __meminit hash__vmemmap_create_mapping(unsigned long start, start 113 arch/powerpc/mm/book3s64/hash_pgtable.c if ((start + page_size) >= H_VMEMMAP_END) { start 118 arch/powerpc/mm/book3s64/hash_pgtable.c rc = htab_bolt_mapping(start, start + page_size, phys, start 122 arch/powerpc/mm/book3s64/hash_pgtable.c int rc2 = htab_remove_mapping(start, start + page_size, start 131 arch/powerpc/mm/book3s64/hash_pgtable.c void hash__vmemmap_remove_mapping(unsigned long start, start 134 arch/powerpc/mm/book3s64/hash_pgtable.c int rc = htab_remove_mapping(start, start + page_size, start 414 arch/powerpc/mm/book3s64/hash_pgtable.c static bool hash__change_memory_range(unsigned long start, unsigned long end, start 423 arch/powerpc/mm/book3s64/hash_pgtable.c start = ALIGN_DOWN(start, step); start 426 arch/powerpc/mm/book3s64/hash_pgtable.c if (start >= end) start 430 arch/powerpc/mm/book3s64/hash_pgtable.c start, end, newpp, step); start 432 arch/powerpc/mm/book3s64/hash_pgtable.c for (idx = start; idx < end; idx += step) start 442 arch/powerpc/mm/book3s64/hash_pgtable.c unsigned long start, end; start 444 arch/powerpc/mm/book3s64/hash_pgtable.c start = (unsigned long)_stext; start 447 arch/powerpc/mm/book3s64/hash_pgtable.c WARN_ON(!hash__change_memory_range(start, end, PP_RXXX)); start 452 arch/powerpc/mm/book3s64/hash_pgtable.c unsigned long start, end, pp; start 454 arch/powerpc/mm/book3s64/hash_pgtable.c start = (unsigned long)__init_begin; start 459 arch/powerpc/mm/book3s64/hash_pgtable.c WARN_ON(!hash__change_memory_range(start, end, pp)); start 192 arch/powerpc/mm/book3s64/hash_tlb.c void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, start 199 arch/powerpc/mm/book3s64/hash_tlb.c start = _ALIGN_DOWN(start, PAGE_SIZE); start 214 arch/powerpc/mm/book3s64/hash_tlb.c for (; start < end; start += PAGE_SIZE) { start 215 arch/powerpc/mm/book3s64/hash_tlb.c pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp, start 223 arch/powerpc/mm/book3s64/hash_tlb.c trace_hugepage_invalidate(start, pte); start 227 arch/powerpc/mm/book3s64/hash_tlb.c hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); start 229 arch/powerpc/mm/book3s64/hash_tlb.c hpte_need_flush(mm, start, ptep, pte, hugepage_shift); start 792 arch/powerpc/mm/book3s64/hash_utils.c int hash__create_section_mapping(unsigned long start, unsigned long end, int nid) start 801 arch/powerpc/mm/book3s64/hash_utils.c rc = htab_bolt_mapping(start, end, __pa(start), start 806 arch/powerpc/mm/book3s64/hash_utils.c int rc2 = htab_remove_mapping(start, end, mmu_linear_psize, start 813 arch/powerpc/mm/book3s64/hash_utils.c int hash__remove_section_mapping(unsigned long start, unsigned long end) start 815 arch/powerpc/mm/book3s64/hash_utils.c int rc = htab_remove_mapping(start, end, mmu_linear_psize, start 174 arch/powerpc/mm/book3s64/pgtable.c int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid) start 177 arch/powerpc/mm/book3s64/pgtable.c return radix__create_section_mapping(start, end, nid); start 179 arch/powerpc/mm/book3s64/pgtable.c return hash__create_section_mapping(start, end, nid); start 182 arch/powerpc/mm/book3s64/pgtable.c int __meminit remove_section_mapping(unsigned long start, unsigned long end) start 185 arch/powerpc/mm/book3s64/pgtable.c return radix__remove_section_mapping(start, end); start 187 arch/powerpc/mm/book3s64/pgtable.c return hash__remove_section_mapping(start, end); start 30 arch/powerpc/mm/book3s64/radix_hugetlbpage.c void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, start 37 arch/powerpc/mm/book3s64/radix_hugetlbpage.c radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); start 170 arch/powerpc/mm/book3s64/radix_pgtable.c void radix__change_memory_range(unsigned long start, unsigned long end, start 179 arch/powerpc/mm/book3s64/radix_pgtable.c start = ALIGN_DOWN(start, PAGE_SIZE); start 183 arch/powerpc/mm/book3s64/radix_pgtable.c start, end, clear); start 185 arch/powerpc/mm/book3s64/radix_pgtable.c for (idx = start; idx < end; idx += PAGE_SIZE) { start 208 arch/powerpc/mm/book3s64/radix_pgtable.c radix__flush_tlb_kernel_range(start, end); start 213 arch/powerpc/mm/book3s64/radix_pgtable.c unsigned long start, end; start 215 arch/powerpc/mm/book3s64/radix_pgtable.c start = (unsigned long)_stext; start 218 arch/powerpc/mm/book3s64/radix_pgtable.c radix__change_memory_range(start, end, _PAGE_WRITE); start 223 arch/powerpc/mm/book3s64/radix_pgtable.c unsigned long start = (unsigned long)__init_begin; start 226 arch/powerpc/mm/book3s64/radix_pgtable.c radix__change_memory_range(start, end, _PAGE_EXEC); start 231 arch/powerpc/mm/book3s64/radix_pgtable.c print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec) start 235 arch/powerpc/mm/book3s64/radix_pgtable.c if (end <= start) start 240 arch/powerpc/mm/book3s64/radix_pgtable.c pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, start 253 arch/powerpc/mm/book3s64/radix_pgtable.c static int __meminit create_physical_mapping(unsigned long start, start 262 arch/powerpc/mm/book3s64/radix_pgtable.c start = _ALIGN_UP(start, PAGE_SIZE); start 263 arch/powerpc/mm/book3s64/radix_pgtable.c for (addr = start; addr < end; addr += mapping_size) { start 296 arch/powerpc/mm/book3s64/radix_pgtable.c print_mapping(start, addr, previous_size, prev_exec); start 297 arch/powerpc/mm/book3s64/radix_pgtable.c start = addr; start 300 arch/powerpc/mm/book3s64/radix_pgtable.c rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); start 307 arch/powerpc/mm/book3s64/radix_pgtable.c print_mapping(start, addr, mapping_size, exec); start 694 arch/powerpc/mm/book3s64/radix_pgtable.c unsigned long start; start 710 arch/powerpc/mm/book3s64/radix_pgtable.c create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1); start 778 arch/powerpc/mm/book3s64/radix_pgtable.c params.start = addr; start 840 arch/powerpc/mm/book3s64/radix_pgtable.c static void __meminit remove_pagetable(unsigned long start, unsigned long end) start 848 arch/powerpc/mm/book3s64/radix_pgtable.c for (addr = start; addr < end; addr = next) { start 865 arch/powerpc/mm/book3s64/radix_pgtable.c radix__flush_tlb_kernel_range(start, end); start 868 arch/powerpc/mm/book3s64/radix_pgtable.c int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid) start 875 arch/powerpc/mm/book3s64/radix_pgtable.c return create_physical_mapping(__pa(start), __pa(end), nid); start 878 arch/powerpc/mm/book3s64/radix_pgtable.c int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) start 880 arch/powerpc/mm/book3s64/radix_pgtable.c remove_pagetable(start, end); start 893 arch/powerpc/mm/book3s64/radix_pgtable.c int __meminit radix__vmemmap_create_mapping(unsigned long start, start 902 arch/powerpc/mm/book3s64/radix_pgtable.c if ((start + page_size) >= RADIX_VMEMMAP_END) { start 907 arch/powerpc/mm/book3s64/radix_pgtable.c ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid); start 914 arch/powerpc/mm/book3s64/radix_pgtable.c void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) start 916 arch/powerpc/mm/book3s64/radix_pgtable.c remove_pagetable(start, start + page_size); start 415 arch/powerpc/mm/book3s64/radix_tlb.c static inline void __tlbiel_va_range(unsigned long start, unsigned long end, start 422 arch/powerpc/mm/book3s64/radix_tlb.c for (addr = start; addr < end; addr += page_size) start 436 arch/powerpc/mm/book3s64/radix_tlb.c static inline void _tlbiel_va_range(unsigned long start, unsigned long end, start 443 arch/powerpc/mm/book3s64/radix_tlb.c __tlbiel_va_range(start, end, pid, page_size, psize); start 447 arch/powerpc/mm/book3s64/radix_tlb.c static inline void __tlbie_va_range(unsigned long start, unsigned long end, start 454 arch/powerpc/mm/book3s64/radix_tlb.c for (addr = start; addr < end; addr += page_size) start 503 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long start; start 514 arch/powerpc/mm/book3s64/radix_tlb.c _tlbiel_va_range(t->start, t->end, t->pid, t->page_size, start 529 arch/powerpc/mm/book3s64/radix_tlb.c static inline void _tlbie_va_range(unsigned long start, unsigned long end, start 536 arch/powerpc/mm/book3s64/radix_tlb.c __tlbie_va_range(start, end, pid, page_size, psize); start 541 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long start, unsigned long end, start 546 arch/powerpc/mm/book3s64/radix_tlb.c struct tlbiel_va_range t = { .start = start, .end = end, start 552 arch/powerpc/mm/book3s64/radix_tlb.c _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); start 811 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) start 835 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long start, unsigned long end, start 842 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long nr_pages = (end - start) >> page_shift; start 891 arch/powerpc/mm/book3s64/radix_tlb.c hstart = (start + PMD_SIZE - 1) & PMD_MASK; start 898 arch/powerpc/mm/book3s64/radix_tlb.c gstart = (start + PUD_SIZE - 1) & PUD_MASK; start 906 arch/powerpc/mm/book3s64/radix_tlb.c __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); start 916 arch/powerpc/mm/book3s64/radix_tlb.c __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); start 927 arch/powerpc/mm/book3s64/radix_tlb.c start, end, pid, page_size, mmu_virtual_psize, false); start 939 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 945 arch/powerpc/mm/book3s64/radix_tlb.c return radix__flush_hugetlb_tlb_range(vma, start, end); start 948 arch/powerpc/mm/book3s64/radix_tlb.c __radix__flush_tlb_range(vma->vm_mm, start, end, false); start 1006 arch/powerpc/mm/book3s64/radix_tlb.c static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, start 1014 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long start = tlb->start; start 1055 arch/powerpc/mm/book3s64/radix_tlb.c __radix__flush_tlb_range(mm, start, end, true); start 1066 arch/powerpc/mm/book3s64/radix_tlb.c radix__flush_tlb_range_psize(mm, start, end, psize); start 1068 arch/powerpc/mm/book3s64/radix_tlb.c radix__flush_tlb_pwc_range_psize(mm, start, end, psize); start 1074 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long start, unsigned long end, start 1080 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long nr_pages = (end - start) >> page_shift; start 1124 arch/powerpc/mm/book3s64/radix_tlb.c _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); start 1126 arch/powerpc/mm/book3s64/radix_tlb.c _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); start 1129 arch/powerpc/mm/book3s64/radix_tlb.c start, end, pid, page_size, psize, also_pwc); start 1134 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, start 1137 arch/powerpc/mm/book3s64/radix_tlb.c return __radix__flush_tlb_range_psize(mm, start, end, psize, false); start 1140 arch/powerpc/mm/book3s64/radix_tlb.c static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, start 1143 arch/powerpc/mm/book3s64/radix_tlb.c __radix__flush_tlb_range_psize(mm, start, end, psize, true); start 1186 arch/powerpc/mm/book3s64/radix_tlb.c unsigned long start, unsigned long end) start 1188 arch/powerpc/mm/book3s64/radix_tlb.c radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); start 365 arch/powerpc/mm/book3s64/slb.c void preload_new_slb_context(unsigned long start, unsigned long sp) start 380 arch/powerpc/mm/book3s64/slb.c if (!is_kernel_addr(start)) { start 381 arch/powerpc/mm/book3s64/slb.c if (preload_add(ti, start)) start 382 arch/powerpc/mm/book3s64/slb.c slb_allocate_user(mm, start); start 24 arch/powerpc/mm/dma-noncoherent.c unsigned long start = (unsigned long)vaddr; start 25 arch/powerpc/mm/dma-noncoherent.c unsigned long end = start + size; start 35 arch/powerpc/mm/dma-noncoherent.c if ((start | end) & (L1_CACHE_BYTES - 1)) start 36 arch/powerpc/mm/dma-noncoherent.c flush_dcache_range(start, end); start 38 arch/powerpc/mm/dma-noncoherent.c invalidate_dcache_range(start, end); start 41 arch/powerpc/mm/dma-noncoherent.c clean_dcache_range(start, end); start 44 arch/powerpc/mm/dma-noncoherent.c flush_dcache_range(start, end); start 64 arch/powerpc/mm/dma-noncoherent.c unsigned long flags, start, seg_offset = offset; start 71 arch/powerpc/mm/dma-noncoherent.c start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset; start 74 arch/powerpc/mm/dma-noncoherent.c __dma_sync((void *)start, seg_size, direction); start 75 arch/powerpc/mm/dma-noncoherent.c kunmap_atomic((void *)start); start 102 arch/powerpc/mm/dma-noncoherent.c unsigned long start = (unsigned long)page_address(page) + offset; start 103 arch/powerpc/mm/dma-noncoherent.c __dma_sync((void *)start, size, dir); start 303 arch/powerpc/mm/hugetlbpage.c unsigned long start, unsigned long end, start 317 arch/powerpc/mm/hugetlbpage.c start &= pdmask; start 318 arch/powerpc/mm/hugetlbpage.c if (start < floor) start 346 arch/powerpc/mm/hugetlbpage.c unsigned long start; start 348 arch/powerpc/mm/hugetlbpage.c start = addr; start 376 arch/powerpc/mm/hugetlbpage.c start &= PUD_MASK; start 377 arch/powerpc/mm/hugetlbpage.c if (start < floor) start 387 arch/powerpc/mm/hugetlbpage.c pmd = pmd_offset(pud, start); start 389 arch/powerpc/mm/hugetlbpage.c pmd_free_tlb(tlb, pmd, start); start 399 arch/powerpc/mm/hugetlbpage.c unsigned long start; start 401 arch/powerpc/mm/hugetlbpage.c start = addr; start 427 arch/powerpc/mm/hugetlbpage.c start &= PGDIR_MASK; start 428 arch/powerpc/mm/hugetlbpage.c if (start < floor) start 438 arch/powerpc/mm/hugetlbpage.c pud = pud_offset(pgd, start); start 440 arch/powerpc/mm/hugetlbpage.c pud_free_tlb(tlb, pud, start); start 673 arch/powerpc/mm/hugetlbpage.c void *start; start 681 arch/powerpc/mm/hugetlbpage.c start = kmap_atomic(page+i); start 682 arch/powerpc/mm/hugetlbpage.c __flush_dcache_icache(start); start 683 arch/powerpc/mm/hugetlbpage.c kunmap_atomic(start); start 91 arch/powerpc/mm/init_64.c static int __meminit vmemmap_populated(unsigned long start, int page_size) start 93 arch/powerpc/mm/init_64.c unsigned long end = start + page_size; start 94 arch/powerpc/mm/init_64.c start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); start 96 arch/powerpc/mm/init_64.c for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) start 97 arch/powerpc/mm/init_64.c if (pfn_valid(page_to_pfn((struct page *)start))) start 157 arch/powerpc/mm/init_64.c unsigned long start, start 169 arch/powerpc/mm/init_64.c vmem_back->virt_addr = start; start 175 arch/powerpc/mm/init_64.c static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, start 179 arch/powerpc/mm/init_64.c unsigned long start_pfn = page_to_pfn((struct page *)start); start 190 arch/powerpc/mm/init_64.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 196 arch/powerpc/mm/init_64.c start = _ALIGN_DOWN(start, page_size); start 198 arch/powerpc/mm/init_64.c pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); start 200 arch/powerpc/mm/init_64.c for (; start < end; start += page_size) { start 204 arch/powerpc/mm/init_64.c if (vmemmap_populated(start, page_size)) start 212 arch/powerpc/mm/init_64.c if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { start 222 arch/powerpc/mm/init_64.c vmemmap_list_populate(__pa(p), start, node); start 225 arch/powerpc/mm/init_64.c start, start + page_size, p); start 227 arch/powerpc/mm/init_64.c rc = vmemmap_create_mapping(start, page_size, __pa(p)); start 239 arch/powerpc/mm/init_64.c static unsigned long vmemmap_list_free(unsigned long start) start 247 arch/powerpc/mm/init_64.c if (vmem_back->virt_addr == start) start 271 arch/powerpc/mm/init_64.c void __ref vmemmap_free(unsigned long start, unsigned long end, start 279 arch/powerpc/mm/init_64.c start = _ALIGN_DOWN(start, page_size); start 286 arch/powerpc/mm/init_64.c pr_debug("vmemmap_free %lx...%lx\n", start, end); start 288 arch/powerpc/mm/init_64.c for (; start < end; start += page_size) { start 297 arch/powerpc/mm/init_64.c if (vmemmap_populated(start, page_size)) start 300 arch/powerpc/mm/init_64.c addr = vmemmap_list_free(start); start 326 arch/powerpc/mm/init_64.c vmemmap_remove_mapping(start, page_size); start 81 arch/powerpc/mm/kasan/kasan_init_32.c static int __ref kasan_init_region(void *start, size_t size) start 83 arch/powerpc/mm/kasan/kasan_init_32.c unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); start 84 arch/powerpc/mm/kasan/kasan_init_32.c unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); start 91 arch/powerpc/mm/mem.c int memory_add_physaddr_to_nid(u64 start) start 93 arch/powerpc/mm/mem.c return hot_add_scn_to_nid(start); start 97 arch/powerpc/mm/mem.c int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) start 102 arch/powerpc/mm/mem.c int __weak remove_section_mapping(unsigned long start, unsigned long end) start 117 arch/powerpc/mm/mem.c static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, start 122 arch/powerpc/mm/mem.c for (i = start; i < stop; i += chunk) { start 128 arch/powerpc/mm/mem.c int __ref arch_add_memory(int nid, u64 start, u64 size, start 131 arch/powerpc/mm/mem.c unsigned long start_pfn = start >> PAGE_SHIFT; start 137 arch/powerpc/mm/mem.c start = (unsigned long)__va(start); start 138 arch/powerpc/mm/mem.c rc = create_section_mapping(start, start + size, nid); start 141 arch/powerpc/mm/mem.c start, start + size, rc); start 145 arch/powerpc/mm/mem.c flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); start 150 arch/powerpc/mm/mem.c void __ref arch_remove_memory(int nid, u64 start, u64 size, start 153 arch/powerpc/mm/mem.c unsigned long start_pfn = start >> PAGE_SHIFT; start 160 arch/powerpc/mm/mem.c start = (unsigned long)__va(start); start 161 arch/powerpc/mm/mem.c flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); start 163 arch/powerpc/mm/mem.c ret = remove_section_mapping(start, start + size); start 382 arch/powerpc/mm/mem.c static void invalidate_icache_range(unsigned long start, unsigned long stop) start 386 arch/powerpc/mm/mem.c char *addr = (char *)(start & ~(bytes - 1)); start 406 arch/powerpc/mm/mem.c void flush_icache_range(unsigned long start, unsigned long stop) start 408 arch/powerpc/mm/mem.c if (flush_coherent_icache(start)) start 411 arch/powerpc/mm/mem.c clean_dcache_range(start, stop); start 419 arch/powerpc/mm/mem.c iccci((void *)start); start 423 arch/powerpc/mm/mem.c invalidate_icache_range(start, stop); start 495 arch/powerpc/mm/mem.c void *start = kmap_atomic(page); start 496 arch/powerpc/mm/mem.c __flush_dcache_icache(start); start 497 arch/powerpc/mm/mem.c kunmap_atomic(start); start 604 arch/powerpc/mm/mem.c res->start = base; start 22 arch/powerpc/mm/nohash/book3e_pgtable.c int __meminit vmemmap_create_mapping(unsigned long start, start 41 arch/powerpc/mm/nohash/book3e_pgtable.c BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags))); start 47 arch/powerpc/mm/nohash/book3e_pgtable.c void vmemmap_remove_mapping(unsigned long start, start 60 arch/powerpc/mm/nohash/fsl_booke.c unsigned long start; start 67 arch/powerpc/mm/nohash/fsl_booke.c return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; start 78 arch/powerpc/mm/nohash/fsl_booke.c if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) start 79 arch/powerpc/mm/nohash/fsl_booke.c return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); start 91 arch/powerpc/mm/nohash/fsl_booke.c && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) start 93 arch/powerpc/mm/nohash/fsl_booke.c return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); start 138 arch/powerpc/mm/nohash/fsl_booke.c tlbcam_addrs[index].start = virt; start 264 arch/powerpc/mm/nohash/fsl_booke.c notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) start 268 arch/powerpc/mm/nohash/fsl_booke.c kernstart_addr = start; start 291 arch/powerpc/mm/nohash/fsl_booke.c start &= ~0x3ffffff; start 293 arch/powerpc/mm/nohash/fsl_booke.c virt_phys_offset = base - start; start 301 arch/powerpc/mm/nohash/fsl_booke.c if (start != memstart_addr) { start 303 arch/powerpc/mm/nohash/fsl_booke.c long offset = start - memstart_addr; start 308 arch/powerpc/mm/nohash/fsl_booke.c if (memstart_addr > start) start 312 arch/powerpc/mm/nohash/fsl_booke.c map_mem_in_cams_addr(start, PAGE_OFFSET + offset, start 363 arch/powerpc/mm/nohash/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 382 arch/powerpc/mm/nohash/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 386 arch/powerpc/mm/nohash/tlb.c if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) start 387 arch/powerpc/mm/nohash/tlb.c flush_tlb_page(vma, start); start 414 arch/powerpc/mm/nohash/tlb.c unsigned long start = address & PMD_MASK; start 422 arch/powerpc/mm/nohash/tlb.c while (start < end) { start 423 arch/powerpc/mm/nohash/tlb.c __flush_tlb_page(tlb->mm, start, tsize, 1); start 424 arch/powerpc/mm/nohash/tlb.c start += size; start 552 arch/powerpc/mm/numa.c static unsigned long __init numa_enforce_memory_limit(unsigned long start, start 562 arch/powerpc/mm/numa.c if (start + size <= memblock_end_of_DRAM()) start 565 arch/powerpc/mm/numa.c if (start >= memblock_end_of_DRAM()) start 568 arch/powerpc/mm/numa.c return memblock_end_of_DRAM() - start; start 685 arch/powerpc/mm/numa.c unsigned long start; start 703 arch/powerpc/mm/numa.c start = read_n_cells(n_mem_addr_cells, &memcell_buf); start 715 arch/powerpc/mm/numa.c fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); start 718 arch/powerpc/mm/numa.c size = numa_enforce_memory_limit(start, size); start 720 arch/powerpc/mm/numa.c memblock_set_node(start, size, &memblock.memory, nid); start 995 arch/powerpc/mm/numa.c unsigned long start, size; start 1008 arch/powerpc/mm/numa.c start = read_n_cells(n_mem_addr_cells, &memcell_buf); start 1011 arch/powerpc/mm/numa.c if ((scn_addr < start) || (scn_addr >= (start + size))) start 189 arch/powerpc/mm/pgtable_32.c struct page *start = page; start 199 arch/powerpc/mm/pgtable_32.c flush_tlb_kernel_range((unsigned long)page_address(start), start 372 arch/powerpc/mm/ptdump/hashpagetable.c static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) start 379 arch/powerpc/mm/ptdump/hashpagetable.c addr = start + i * PAGE_SIZE; start 406 arch/powerpc/mm/ptdump/hashpagetable.c static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) start 413 arch/powerpc/mm/ptdump/hashpagetable.c addr = start + i * PMD_SIZE; start 420 arch/powerpc/mm/ptdump/hashpagetable.c static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) start 427 arch/powerpc/mm/ptdump/hashpagetable.c addr = start + i * PUD_SIZE; start 252 arch/powerpc/mm/ptdump/ptdump.c static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) start 259 arch/powerpc/mm/ptdump/ptdump.c addr = start + i * PAGE_SIZE; start 265 arch/powerpc/mm/ptdump/ptdump.c static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) start 272 arch/powerpc/mm/ptdump/ptdump.c addr = start + i * PMD_SIZE; start 281 arch/powerpc/mm/ptdump/ptdump.c static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) start 288 arch/powerpc/mm/ptdump/ptdump.c addr = start + i * PUD_SIZE; start 60 arch/powerpc/mm/slice.c static void slice_range_to_mask(unsigned long start, unsigned long len, start 63 arch/powerpc/mm/slice.c unsigned long end = start + len - 1; start 69 arch/powerpc/mm/slice.c if (slice_addr_is_low(start)) { start 74 arch/powerpc/mm/slice.c - (1u << GET_LOW_SLICE_INDEX(start)); start 78 arch/powerpc/mm/slice.c unsigned long start_index = GET_HIGH_SLICE_INDEX(start); start 105 arch/powerpc/mm/slice.c unsigned long start = slice << SLICE_HIGH_SHIFT; start 106 arch/powerpc/mm/slice.c unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); start 111 arch/powerpc/mm/slice.c if (start == 0) start 112 arch/powerpc/mm/slice.c start = (unsigned long)SLICE_LOW_TOP; start 114 arch/powerpc/mm/slice.c return !slice_area_is_free(mm, start, end - start); start 140 arch/powerpc/mm/slice.c unsigned long start, unsigned long len) start 142 arch/powerpc/mm/slice.c unsigned long end = start + len - 1; start 145 arch/powerpc/mm/slice.c if (slice_addr_is_low(start)) { start 150 arch/powerpc/mm/slice.c - (1u << GET_LOW_SLICE_INDEX(start)); start 156 arch/powerpc/mm/slice.c unsigned long start_index = GET_HIGH_SLICE_INDEX(start); start 729 arch/powerpc/mm/slice.c void slice_set_range_psize(struct mm_struct *mm, unsigned long start, start 736 arch/powerpc/mm/slice.c slice_range_to_mask(start, len, &mask); start 18 arch/powerpc/net/bpf_jit_comp.c static inline void bpf_flush_icache(void *start, void *end) start 21 arch/powerpc/net/bpf_jit_comp.c flush_icache_range((unsigned long)start, (unsigned long)end); start 26 arch/powerpc/net/bpf_jit_comp64.c static inline void bpf_flush_icache(void *start, void *end) start 29 arch/powerpc/net/bpf_jit_comp64.c flush_icache_range((unsigned long)start, (unsigned long)end); start 86 arch/powerpc/oprofile/common.c ret = model->start(ctr); start 97 arch/powerpc/oprofile/common.c if (model->start) { start 231 arch/powerpc/oprofile/common.c ops->start = op_powerpc_start; start 204 arch/powerpc/oprofile/op_model_7450.c .start = fsl7450_start, start 377 arch/powerpc/oprofile/op_model_fsl_emb.c .start = fsl_emb_start, start 224 arch/powerpc/oprofile/op_model_pa6t.c .start = pa6t_start, start 435 arch/powerpc/oprofile/op_model_power4.c .start = power4_start, start 2023 arch/powerpc/perf/core-book3s.c .start = power_pmu_start, start 592 arch/powerpc/perf/core-fsl-emb.c .start = fsl_emb_pmu_start, start 220 arch/powerpc/perf/hv-24x7.c void *start = ev; start 222 arch/powerpc/perf/hv-24x7.c return (start + offsetof(struct hv_24x7_event_data, remainder)) < end; start 235 arch/powerpc/perf/hv-24x7.c void *start = ev; start 245 arch/powerpc/perf/hv-24x7.c if (start + nl > end) { start 247 arch/powerpc/perf/hv-24x7.c __func__, start, nl, end); start 260 arch/powerpc/perf/hv-24x7.c if (start + nl + dl > end) { start 262 arch/powerpc/perf/hv-24x7.c __func__, start, nl, dl, start + nl + dl, end); start 276 arch/powerpc/perf/hv-24x7.c if (start + nl + dl + ldl > end) { start 278 arch/powerpc/perf/hv-24x7.c __func__, start, nl, dl, ldl, end); start 282 arch/powerpc/perf/hv-24x7.c return start + nl + dl + ldl; start 1561 arch/powerpc/perf/hv-24x7.c .start = h_24x7_event_start, start 272 arch/powerpc/perf/hv-gpci.c .start = h_gpci_event_start, start 1330 arch/powerpc/perf/imc-pmu.c pmu->pmu.start = imc_event_start; start 1358 arch/powerpc/perf/imc-pmu.c pmu->pmu.start = trace_imc_event_start; start 143 arch/powerpc/platforms/4xx/hsta_msi.c ppc4xx_hsta_msi.address = mem->start; start 144 arch/powerpc/platforms/4xx/hsta_msi.c ppc4xx_hsta_msi.data = ioremap(mem->start, resource_size(mem)); start 172 arch/powerpc/platforms/4xx/msi.c mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */ start 173 arch/powerpc/platforms/4xx/msi.c mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */ start 82 arch/powerpc/platforms/4xx/pci.c dev->resource[i].start = dev->resource[i].end = 0; start 102 arch/powerpc/platforms/4xx/pci.c res->start = 0; start 143 arch/powerpc/platforms/4xx/pci.c res->start = pci_addr; start 149 arch/powerpc/platforms/4xx/pci.c res->end = res->start + size - 1; start 154 arch/powerpc/platforms/4xx/pci.c if (dma_offset_set && pci_dram_offset != res->start) { start 171 arch/powerpc/platforms/4xx/pci.c (res->start & (size - 1)) != 0) { start 188 arch/powerpc/platforms/4xx/pci.c pci_dram_offset = res->start; start 189 arch/powerpc/platforms/4xx/pci.c hose->dma_window_base_cur = res->start; start 272 arch/powerpc/platforms/4xx/pci.c res->start, start 273 arch/powerpc/platforms/4xx/pci.c res->start - offset, start 282 arch/powerpc/platforms/4xx/pci.c if (res->start == offset) start 312 arch/powerpc/platforms/4xx/pci.c PCI_BASE_ADDRESS_1, res->start); start 357 arch/powerpc/platforms/4xx/pci.c reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg)); start 372 arch/powerpc/platforms/4xx/pci.c setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0); start 471 arch/powerpc/platforms/4xx/pci.c res->start, start 472 arch/powerpc/platforms/4xx/pci.c res->start - offset, start 481 arch/powerpc/platforms/4xx/pci.c if (res->start == offset) start 520 arch/powerpc/platforms/4xx/pci.c writel(res->start, reg + PCIX0_BAR0L); start 563 arch/powerpc/platforms/4xx/pci.c reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg)); start 578 arch/powerpc/platforms/4xx/pci.c setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, start 1241 arch/powerpc/platforms/4xx/pci.c mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); start 1376 arch/powerpc/platforms/4xx/pci.c void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000, start 1467 arch/powerpc/platforms/4xx/pci.c RES_TO_U32_HIGH(port->cfg_space.start)); start 1469 arch/powerpc/platforms/4xx/pci.c RES_TO_U32_LOW(port->cfg_space.start)); start 1476 arch/powerpc/platforms/4xx/pci.c RES_TO_U32_HIGH(port->utl_regs.start)); start 1478 arch/powerpc/platforms/4xx/pci.c RES_TO_U32_LOW(port->utl_regs.start)); start 1512 arch/powerpc/platforms/4xx/pci.c port->utl_base = ioremap(port->utl_regs.start, 0x100); start 1808 arch/powerpc/platforms/4xx/pci.c res->start, start 1809 arch/powerpc/platforms/4xx/pci.c res->start - offset, start 1818 arch/powerpc/platforms/4xx/pci.c if (res->start == offset) start 1901 arch/powerpc/platforms/4xx/pci.c out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); start 1902 arch/powerpc/platforms/4xx/pci.c out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); start 1960 arch/powerpc/platforms/4xx/pci.c cfg_data = ioremap(port->cfg_space.start + start 1974 arch/powerpc/platforms/4xx/pci.c mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); start 987 arch/powerpc/platforms/512x/clock-commonclk.c snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \ start 1037 arch/powerpc/platforms/512x/clock-commonclk.c idx = (res.start >> 8) & 0xf; start 1055 arch/powerpc/platforms/512x/clock-commonclk.c idx += (res.start & 0x2000) ? 2 : 0; start 1056 arch/powerpc/platforms/512x/clock-commonclk.c idx += (res.start & 0x0080) ? 1 : 0; start 1128 arch/powerpc/platforms/512x/clock-commonclk.c if (res.start & 0x4000) start 1137 arch/powerpc/platforms/512x/clock-commonclk.c idx = (res.start & 0x4000) ? 1 : 0; start 447 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c lpbfifo.regs_phys = r.start; start 195 arch/powerpc/platforms/512x/mpc512x_shared.c unsigned long start, end; start 198 arch/powerpc/platforms/512x/mpc512x_shared.c start = PFN_UP(addr); start 201 arch/powerpc/platforms/512x/mpc512x_shared.c for (; start < end; start++) start 202 arch/powerpc/platforms/512x/mpc512x_shared.c mpc512x_free_bootmem(pfn_to_page(start)); start 486 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c lpbfifo.regs_phys = res.start; start 504 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, start 520 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, start 262 arch/powerpc/platforms/52xx/mpc52xx_pci.c (unsigned long long)res->start, start 266 arch/powerpc/platforms/52xx/mpc52xx_pci.c MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start, start 278 arch/powerpc/platforms/52xx/mpc52xx_pci.c res->start, res->end, res->flags); start 280 arch/powerpc/platforms/52xx/mpc52xx_pci.c MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start, start 297 arch/powerpc/platforms/52xx/mpc52xx_pci.c (unsigned long long)res->start, start 302 arch/powerpc/platforms/52xx/mpc52xx_pci.c res->start, start 346 arch/powerpc/platforms/52xx/mpc52xx_pci.c if (res->end > res->start) { /* Only valid resources */ start 347 arch/powerpc/platforms/52xx/mpc52xx_pci.c res->end -= res->start; start 348 arch/powerpc/platforms/52xx/mpc52xx_pci.c res->start = 0; start 359 arch/powerpc/platforms/52xx/mpc52xx_pci.c res->start = res->end = res->flags = 0; start 405 arch/powerpc/platforms/52xx/mpc52xx_pci.c pci_regs = ioremap(rsrc.start, resource_size(&rsrc)); start 413 arch/powerpc/platforms/52xx/mpc52xx_pci.c mpc52xx_pci_setup(hose, pci_regs, rsrc.start); start 79 arch/powerpc/platforms/52xx/mpc52xx_pm.c mbar = ioremap(res.start, 0xc000); /* we should map whole region including SRAM */ start 132 arch/powerpc/platforms/82xx/ep8248e.c snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); start 48 arch/powerpc/platforms/82xx/pq2.c if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b) start 59 arch/powerpc/platforms/82xx/pq2.c setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0); start 67 arch/powerpc/platforms/83xx/km83xx.c base = ioremap(res.start, res.end - res.start + 1); start 65 arch/powerpc/platforms/83xx/mpc832x_mds.c bcsr_regs = ioremap(res.start, resource_size(&res)); start 51 arch/powerpc/platforms/83xx/mpc834x_mds.c bcsr_regs = ioremap(res.start, resource_size(&res)); start 73 arch/powerpc/platforms/83xx/mpc836x_mds.c bcsr_regs = ioremap(res.start, resource_size(&res)); start 361 arch/powerpc/platforms/83xx/suspend.c pmc_regs = ioremap(res.start, sizeof(*pmc_regs)); start 374 arch/powerpc/platforms/83xx/suspend.c clock_regs = ioremap(res.start, sizeof(*clock_regs)); start 169 arch/powerpc/platforms/83xx/usb.c usb_regs = ioremap(res.start, resource_size(&res)); start 92 arch/powerpc/platforms/85xx/ge_imp3a.c if ((rsrc.start & 0xfffff) == 0x9000) start 193 arch/powerpc/platforms/85xx/mpc85xx_cds.c res->start = 0; start 331 arch/powerpc/platforms/85xx/mpc85xx_mds.c (unsigned long long)res.start, 1); start 338 arch/powerpc/platforms/85xx/mpc85xx_mds.c (unsigned long long)res.start, 7); start 74 arch/powerpc/platforms/85xx/sbc8548.c rev = ioremap(res.start,sizeof(unsigned int)); start 385 arch/powerpc/platforms/85xx/smp.c long start = mftb(); start 411 arch/powerpc/platforms/85xx/smp.c if (!notified && now - start > 1000000) { start 105 arch/powerpc/platforms/85xx/xes_mpc85xx.c l2_base = ioremap(r[0].start, resource_size(&r[0])); start 299 arch/powerpc/platforms/86xx/mpc8610_hpcd.c pixis = ioremap(r.start, 32); start 154 arch/powerpc/platforms/8xx/cpm1.c cpic_reg = ioremap(res.start, resource_size(&res)); start 146 arch/powerpc/platforms/8xx/pic.c siu_reg = ioremap(res.start, resource_size(&res)); start 125 arch/powerpc/platforms/cell/cpufreq_spudemand.c .start = spu_gov_start, start 322 arch/powerpc/platforms/cell/interrupt.c init_one_iic(np[0], r0.start, dn); start 323 arch/powerpc/platforms/cell/interrupt.c init_one_iic(np[1], r1.start, dn); start 272 arch/powerpc/platforms/cell/iommu.c *base = r.start; start 108 arch/powerpc/platforms/cell/setup.c dev->resource[i].start = dev->resource[i].end = 0; start 135 arch/powerpc/platforms/cell/spider-pci.c regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE); start 343 arch/powerpc/platforms/cell/spider-pic.c r.start = hard_coded_pics[chip]; start 346 arch/powerpc/platforms/cell/spider-pic.c spider_init_one(dn, chip++, r.start); start 201 arch/powerpc/platforms/cell/spu_manage.c *phys = resource.start; start 203 arch/powerpc/platforms/cell/spu_manage.c *virt = ioremap(resource.start, len); start 1739 arch/powerpc/platforms/cell/spufs/file.c static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 1742 arch/powerpc/platforms/cell/spufs/file.c int err = file_write_and_wait_range(file, start, end); start 146 arch/powerpc/platforms/chrp/pci.c Hydra = ioremap(r.start, resource_size(&r)); start 147 arch/powerpc/platforms/chrp/pci.c printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start); start 177 arch/powerpc/platforms/chrp/pci.c reg = ioremap(r.start + 0xf6000, 0x40); start 186 arch/powerpc/platforms/chrp/pci.c setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010, 0); start 256 arch/powerpc/platforms/chrp/pci.c printk(" at %llx", (unsigned long long)r.start); start 287 arch/powerpc/platforms/chrp/pci.c r.start + 0x000f8000, start 288 arch/powerpc/platforms/chrp/pci.c r.start + 0x000f8010, start 37 arch/powerpc/platforms/chrp/pegasos_eth.c .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS, start 57 arch/powerpc/platforms/chrp/pegasos_eth.c .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4, start 73 arch/powerpc/platforms/chrp/pegasos_eth.c .start = 9, start 401 arch/powerpc/platforms/chrp/setup.c opaddr = r.start; start 60 arch/powerpc/platforms/chrp/time.c base = r.start; start 145 arch/powerpc/platforms/embedded6xx/flipper-pic.c io_base = ioremap(res.start, resource_size(&res)); start 147 arch/powerpc/platforms/embedded6xx/flipper-pic.c pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); start 170 arch/powerpc/platforms/embedded6xx/hlwd-pic.c io_base = ioremap(res.start, resource_size(&res)); start 176 arch/powerpc/platforms/embedded6xx/hlwd-pic.c pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); start 90 arch/powerpc/platforms/embedded6xx/wii.c hw_regs = ioremap(res.start, resource_size(&res)); start 93 arch/powerpc/platforms/embedded6xx/wii.c res.start, hw_regs); start 232 arch/powerpc/platforms/fsl_uli1575.c dummy = ioremap(res->start, 0x4); start 84 arch/powerpc/platforms/maple/setup.c result = r.start; start 327 arch/powerpc/platforms/maple/setup.c mem = ioremap(r.start, resource_size(&r)); start 152 arch/powerpc/platforms/maple/time.c maple_rtc_addr = r.start; start 163 arch/powerpc/platforms/maple/time.c rtc_iores.start = maple_rtc_addr; start 103 arch/powerpc/platforms/pasemi/dma_lib.c int start, limit; start 107 arch/powerpc/platforms/pasemi/dma_lib.c start = 0; start 111 arch/powerpc/platforms/pasemi/dma_lib.c start = 10; start 115 arch/powerpc/platforms/pasemi/dma_lib.c start = 0; start 120 arch/powerpc/platforms/pasemi/dma_lib.c bit = find_next_bit(txch_free, MAX_TXCH, start); start 562 arch/powerpc/platforms/pasemi/dma_lib.c res.start = 0xfd800000; start 563 arch/powerpc/platforms/pasemi/dma_lib.c res.end = res.start + 0x1000; start 565 arch/powerpc/platforms/pasemi/dma_lib.c dma_status = ioremap_cache(res.start, resource_size(&res)); start 127 arch/powerpc/platforms/pasemi/pci.c pr_info("NEMO SB600 IOB base %08llx\n",res.start); start 129 arch/powerpc/platforms/pasemi/pci.c iob_mapbase = ioremap(res.start + 0x100, 0x94); start 76 arch/powerpc/platforms/pasemi/setup.c .start = 0x70, start 81 arch/powerpc/platforms/pasemi/setup.c .start = 8, start 348 arch/powerpc/platforms/powermac/bootx_init.c static unsigned long __init bootx_flatten_dt(unsigned long start) start 359 arch/powerpc/platforms/powermac/bootx_init.c mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4); start 587 arch/powerpc/platforms/powermac/nvram.c err = core99_nvram_setup(dp, r1.start); start 593 arch/powerpc/platforms/powermac/nvram.c nvram_data = ioremap(r1.start, s1); start 599 arch/powerpc/platforms/powermac/nvram.c nvram_data = ioremap(r1.start, s1); start 605 arch/powerpc/platforms/powermac/nvram.c nvram_addr = ioremap(r1.start, s1); start 606 arch/powerpc/platforms/powermac/nvram.c nvram_data = ioremap(r2.start, s2); start 208 arch/powerpc/platforms/powermac/pci.c hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); start 209 arch/powerpc/platforms/powermac/pci.c hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); start 605 arch/powerpc/platforms/powermac/pci.c hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); start 606 arch/powerpc/platforms/powermac/pci.c hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); start 616 arch/powerpc/platforms/powermac/pci.c hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); start 617 arch/powerpc/platforms/powermac/pci.c hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); start 619 arch/powerpc/platforms/powermac/pci.c return addr->start == 0xf2000000; start 689 arch/powerpc/platforms/powermac/pci.c hose->mem_resources[cur].start = base; start 720 arch/powerpc/platforms/powermac/pci.c hose->cfg_data = ioremap(cfg_res.start, 0x02000000); start 721 arch/powerpc/platforms/powermac/pci.c hose->cfg_addr = ioremap(self_res.start, resource_size(&self_res)); start 731 arch/powerpc/platforms/powermac/pci.c hose->io_resource.start = 0; start 839 arch/powerpc/platforms/powermac/pci.c disp_name, (unsigned long long)rsrc.start, hose->first_busno, start 1152 arch/powerpc/platforms/powermac/pci.c dev->resource[i].start = dev->resource[i].end = 0; start 1162 arch/powerpc/platforms/powermac/pci.c dev->resource[i].start = dev->resource[i].end = 0; start 1208 arch/powerpc/platforms/powermac/pci.c if (r->start >= 0xf0000000 && r->start < 0xf3000000) start 1224 arch/powerpc/platforms/powermac/pci.c reg = ((region->start >> 16) & 0xfff0) | (region->end & 0xfff00000); start 353 arch/powerpc/platforms/powermac/pic.c addr = (u8 __iomem *) ioremap(r.start, 0x40); start 367 arch/powerpc/platforms/powermac/pic.c addr = (u8 __iomem *)ioremap(r.start, 0x40); start 335 arch/powerpc/platforms/powermac/smp.c unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; start 355 arch/powerpc/platforms/powermac/smp.c out_be32(psurge_start, start); start 186 arch/powerpc/platforms/powermac/time.c via = ioremap(rsrc.start, resource_size(&rsrc)); start 26 arch/powerpc/platforms/powernv/memtrace.c u64 start; start 73 arch/powerpc/platforms/powernv/memtrace.c const unsigned long start = PFN_PHYS(start_pfn); start 76 arch/powerpc/platforms/powernv/memtrace.c if (walk_memory_blocks(start, size, NULL, check_memblock_online)) start 79 arch/powerpc/platforms/powernv/memtrace.c walk_memory_blocks(start, size, (void *)MEM_GOING_OFFLINE, start 83 arch/powerpc/platforms/powernv/memtrace.c walk_memory_blocks(start, size, (void *)MEM_ONLINE, start 88 arch/powerpc/platforms/powernv/memtrace.c walk_memory_blocks(start, size, (void *)MEM_OFFLINE, start 159 arch/powerpc/platforms/powernv/memtrace.c memtrace_array[memtrace_array_nr].start = m; start 179 arch/powerpc/platforms/powernv/memtrace.c ent->mem = ioremap(ent->start, ent->size); start 183 arch/powerpc/platforms/powernv/memtrace.c ent->start); start 198 arch/powerpc/platforms/powernv/memtrace.c debugfs_create_x64("start", 0400, dir, &ent->start); start 232 arch/powerpc/platforms/powernv/memtrace.c if (add_memory(ent->nid, ent->start, ent->size)) { start 245 arch/powerpc/platforms/powernv/memtrace.c walk_memory_blocks(ent->start, ent->size, NULL, start 256 arch/powerpc/platforms/powernv/memtrace.c ent->size = ent->start = ent->nid = NUMA_NO_NODE; start 22 arch/powerpc/platforms/powernv/ocxl.c u16 start; start 238 arch/powerpc/platforms/powernv/ocxl.c link->fn_actags[i].start = range_start; start 245 arch/powerpc/platforms/powernv/ocxl.c link->fn_actags[i].start, link->fn_actags[i].count, start 273 arch/powerpc/platforms/powernv/ocxl.c *base = link->fn_actags[PCI_FUNC(dev->devfn)].start; start 165 arch/powerpc/platforms/powernv/opal-irqchip.c if (!opal_irqs || !opal_irqs[i].start) start 169 arch/powerpc/platforms/powernv/opal-irqchip.c disable_irq_nosync(opal_irqs[i].start); start 171 arch/powerpc/platforms/powernv/opal-irqchip.c free_irq(opal_irqs[i].start, NULL); start 173 arch/powerpc/platforms/powernv/opal-irqchip.c opal_irqs[i].start = 0; start 254 arch/powerpc/platforms/powernv/opal-irqchip.c r->start = r->end = virq; start 282 arch/powerpc/platforms/powernv/opal-irqchip.c rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, start 285 arch/powerpc/platforms/powernv/opal-irqchip.c pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); start 126 arch/powerpc/platforms/powernv/pci-ioda.c return (r->start >= phb->ioda.m64_base && start 127 arch/powerpc/platforms/powernv/pci-ioda.c r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); start 232 arch/powerpc/platforms/powernv/pci-ioda.c r->start += (2 * phb->ioda.m64_segsize); start 257 arch/powerpc/platforms/powernv/pci-ioda.c resource_size_t base, sgsz, start, end; start 267 arch/powerpc/platforms/powernv/pci-ioda.c start = _ALIGN_DOWN(r->start - base, sgsz); start 269 arch/powerpc/platforms/powernv/pci-ioda.c for (segno = start / sgsz; segno < end / sgsz; segno++) { start 319 arch/powerpc/platforms/powernv/pci-ioda.c r->start += (2 * phb->ioda.m64_segsize); start 483 arch/powerpc/platforms/powernv/pci-ioda.c res->start = of_translate_address(dn, r + 2); start 484 arch/powerpc/platforms/powernv/pci-ioda.c res->end = res->start + of_read_number(r + 4, 2) - 1; start 487 arch/powerpc/platforms/powernv/pci-ioda.c hose->mem_offset[1] = res->start - pci_addr; start 495 arch/powerpc/platforms/powernv/pci-ioda.c res->start, res->end, pci_addr, m64_range[0], start 795 arch/powerpc/platforms/powernv/pci-ioda.c count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; start 877 arch/powerpc/platforms/powernv/pci-ioda.c count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; start 992 arch/powerpc/platforms/powernv/pci-ioda.c res2.start = res->start + (size * offset); start 993 arch/powerpc/platforms/powernv/pci-ioda.c res2.end = res2.start + (size * num_vfs) - 1; start 1017 arch/powerpc/platforms/powernv/pci-ioda.c res->start += size * offset; start 1031 arch/powerpc/platforms/powernv/pci-ioda.c pdn->holes[i].start = res2.start; start 1032 arch/powerpc/platforms/powernv/pci-ioda.c pdn->holes[i].end = res2.start + size * offset - 1; start 1172 arch/powerpc/platforms/powernv/pci-ioda.c pe->rid = bus->busn_res.start << 8; start 1176 arch/powerpc/platforms/powernv/pci-ioda.c &bus->busn_res.start, &bus->busn_res.end, start 1180 arch/powerpc/platforms/powernv/pci-ioda.c &bus->busn_res.start, pe->pe_number); start 1345 arch/powerpc/platforms/powernv/pci-ioda.c resource_size_t size, start; start 1390 arch/powerpc/platforms/powernv/pci-ioda.c start = res->start + size * j; start 1393 arch/powerpc/platforms/powernv/pci-ioda.c start = res->start; start 1407 arch/powerpc/platforms/powernv/pci-ioda.c start, start 1910 arch/powerpc/platforms/powernv/pci-ioda.c unsigned long start, end, inc; start 1912 arch/powerpc/platforms/powernv/pci-ioda.c start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); start 1917 arch/powerpc/platforms/powernv/pci-ioda.c start |= (1ull << 63); start 1923 arch/powerpc/platforms/powernv/pci-ioda.c while (start <= end) { start 1925 arch/powerpc/platforms/powernv/pci-ioda.c __raw_rm_writeq_be(start, invalidate); start 1927 arch/powerpc/platforms/powernv/pci-ioda.c __raw_writeq_be(start, invalidate); start 1929 arch/powerpc/platforms/powernv/pci-ioda.c start += inc; start 2012 arch/powerpc/platforms/powernv/pci-ioda.c unsigned long start, end, inc; start 2015 arch/powerpc/platforms/powernv/pci-ioda.c start = PHB3_TCE_KILL_INVAL_ONE; start 2016 arch/powerpc/platforms/powernv/pci-ioda.c start |= (pe->pe_number & 0xFF); start 2017 arch/powerpc/platforms/powernv/pci-ioda.c end = start; start 2020 arch/powerpc/platforms/powernv/pci-ioda.c start |= (index << shift); start 2025 arch/powerpc/platforms/powernv/pci-ioda.c while (start <= end) { start 2027 arch/powerpc/platforms/powernv/pci-ioda.c __raw_rm_writeq_be(start, invalidate); start 2029 arch/powerpc/platforms/powernv/pci-ioda.c __raw_writeq_be(start, invalidate); start 2030 arch/powerpc/platforms/powernv/pci-ioda.c start += inc; start 2960 arch/powerpc/platforms/powernv/pci-ioda.c res->end = res->start + size * mul - 1; start 2974 arch/powerpc/platforms/powernv/pci-ioda.c res->end = res->start - 1; start 3011 arch/powerpc/platforms/powernv/pci-ioda.c if (!res || !res->flags || res->start > res->end) start 3015 arch/powerpc/platforms/powernv/pci-ioda.c region.start = res->start - phb->ioda.io_pci_base; start 3017 arch/powerpc/platforms/powernv/pci-ioda.c index = region.start / phb->ioda.io_segsize; start 3020 arch/powerpc/platforms/powernv/pci-ioda.c region.start <= region.end) { start 3030 arch/powerpc/platforms/powernv/pci-ioda.c region.start += phb->ioda.io_segsize; start 3035 arch/powerpc/platforms/powernv/pci-ioda.c region.start = res->start - start 3041 arch/powerpc/platforms/powernv/pci-ioda.c index = region.start / phb->ioda.m32_segsize; start 3044 arch/powerpc/platforms/powernv/pci-ioda.c region.start <= region.end) { start 3054 arch/powerpc/platforms/powernv/pci-ioda.c region.start += phb->ioda.m32_segsize; start 3286 arch/powerpc/platforms/powernv/pci-ioda.c r->start = w->start; start 3749 arch/powerpc/platforms/powernv/pci-ioda.c phb->regs_phys = r.start; start 3750 arch/powerpc/platforms/powernv/pci-ioda.c phb->regs = ioremap(r.start, resource_size(&r)); start 3776 arch/powerpc/platforms/powernv/pci-ioda.c phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; start 149 arch/powerpc/platforms/powernv/rng.c rng->regs_real = (void __iomem *)res.start; start 44 arch/powerpc/platforms/powernv/vas-window.c u64 *start, int *len) start 49 arch/powerpc/platforms/powernv/vas-window.c *start = pbaddr + window->winid * VAS_HVWC_SIZE; start 54 arch/powerpc/platforms/powernv/vas-window.c u64 *start, int *len) start 59 arch/powerpc/platforms/powernv/vas-window.c *start = pbaddr + window->winid * VAS_UWC_SIZE; start 73 arch/powerpc/platforms/powernv/vas-window.c u64 start; start 81 arch/powerpc/platforms/powernv/vas-window.c compute_paste_address(txwin, &start, &len); start 83 arch/powerpc/platforms/powernv/vas-window.c if (!request_mem_region(start, len, name)) { start 85 arch/powerpc/platforms/powernv/vas-window.c __func__, start, len); start 89 arch/powerpc/platforms/powernv/vas-window.c map = ioremap_cache(start, len); start 92 arch/powerpc/platforms/powernv/vas-window.c start, len); start 96 arch/powerpc/platforms/powernv/vas-window.c pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map); start 104 arch/powerpc/platforms/powernv/vas-window.c static void *map_mmio_region(char *name, u64 start, int len) start 108 arch/powerpc/platforms/powernv/vas-window.c if (!request_mem_region(start, len, name)) { start 110 arch/powerpc/platforms/powernv/vas-window.c __func__, start, len); start 114 arch/powerpc/platforms/powernv/vas-window.c map = ioremap(start, len); start 116 arch/powerpc/platforms/powernv/vas-window.c pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start, start 124 arch/powerpc/platforms/powernv/vas-window.c static void unmap_region(void *addr, u64 start, int len) start 127 arch/powerpc/platforms/powernv/vas-window.c release_mem_region((phys_addr_t)start, len); start 190 arch/powerpc/platforms/powernv/vas-window.c u64 start; start 192 arch/powerpc/platforms/powernv/vas-window.c get_hvwc_mmio_bar(window, &start, &len); start 193 arch/powerpc/platforms/powernv/vas-window.c window->hvwc_map = map_mmio_region("HVWCM_Window", start, len); start 195 arch/powerpc/platforms/powernv/vas-window.c get_uwc_mmio_bar(window, &start, &len); start 196 arch/powerpc/platforms/powernv/vas-window.c window->uwc_map = map_mmio_region("UWCM_Window", start, len); start 56 arch/powerpc/platforms/powernv/vas.c vinst->hvwc_bar_start = res->start; start 59 arch/powerpc/platforms/powernv/vas.c vinst->uwc_bar_start = res->start; start 62 arch/powerpc/platforms/powernv/vas.c vinst->paste_base_addr = res->start; start 371 arch/powerpc/platforms/ps3/device-init.c u64 start, size; start 375 arch/powerpc/platforms/ps3/device-init.c i, &id, &start, start 385 arch/powerpc/platforms/ps3/device-init.c __func__, __LINE__, i, id, start, size); start 388 arch/powerpc/platforms/ps3/device-init.c p->regions[i].start = start; start 72 arch/powerpc/platforms/ps3/gelic_udbg.c static void map_dma_mem(int bus_id, int dev_id, void *start, size_t len, start 76 arch/powerpc/platforms/ps3/gelic_udbg.c u64 real_addr = ((u64)start) & 0x0fffffffffffffffUL; start 500 arch/powerpc/platforms/pseries/cmm.c unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn); start 501 arch/powerpc/platforms/pseries/cmm.c unsigned long end = start + (marg->nr_pages << PAGE_SHIFT); start 507 arch/powerpc/platforms/pseries/cmm.c if ((unsigned long)pa >= start && (unsigned long)pa < end) start 510 arch/powerpc/platforms/pseries/cmm.c if (pa->page[idx] >= start && pa->page[idx] < end) start 225 arch/powerpc/platforms/pseries/hotplug-memory.c struct drmem_lmb *lmb, *start, *end; start 228 arch/powerpc/platforms/pseries/hotplug-memory.c start = NULL; start 231 arch/powerpc/platforms/pseries/hotplug-memory.c start = lmb; start 236 arch/powerpc/platforms/pseries/hotplug-memory.c if (!start) start 239 arch/powerpc/platforms/pseries/hotplug-memory.c end = &start[n_lmbs]; start 245 arch/powerpc/platforms/pseries/hotplug-memory.c *start_lmb = start; start 74 arch/powerpc/platforms/pseries/hvCall_inst.c .start = hc_start, start 949 arch/powerpc/platforms/pseries/iommu.c unsigned long start, size; start 960 arch/powerpc/platforms/pseries/iommu.c start = of_read_number(memcell_buf, n_mem_addr_cells); start 965 arch/powerpc/platforms/pseries/iommu.c max_addr = max_t(phys_addr_t, max_addr, start + size); start 366 arch/powerpc/platforms/pseries/papr_scm.c mapping.start = 0; start 478 arch/powerpc/platforms/pseries/papr_scm.c p->res.start = p->bound_addr; start 128 arch/powerpc/platforms/pseries/pci.c cpu_to_be64(res->start + size * vf_index); start 284 arch/powerpc/platforms/pseries/pci.c if (dev->resource[i].start == 0 && dev->resource[i].end) { start 635 arch/powerpc/platforms/pseries/setup.c res->start = base; start 658 arch/powerpc/platforms/pseries/setup.c res->start = base; start 142 arch/powerpc/sysdev/dart_iommu.c unsigned long start = (unsigned long)base; start 143 arch/powerpc/sysdev/dart_iommu.c unsigned long end = start + (count + 1) * sizeof(unsigned int); start 147 arch/powerpc/sysdev/dart_iommu.c flush_dcache_range(start, end); start 289 arch/powerpc/sysdev/dart_iommu.c dart = ioremap(r.start, resource_size(&r)); start 443 arch/powerpc/sysdev/fsl_msi.c msi->msi_regs = ioremap(res.start, resource_size(&res)); start 451 arch/powerpc/sysdev/fsl_msi.c features->msiir_offset + (res.start & 0xfffff); start 459 arch/powerpc/sysdev/fsl_msi.c (res.start & MSIIR_OFFSET_MASK); start 461 arch/powerpc/sysdev/fsl_msi.c msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK; start 147 arch/powerpc/sysdev/fsl_pci.c resource_size_t pci_addr = res->start - offset; start 148 arch/powerpc/sysdev/fsl_pci.c resource_size_t phys_addr = res->start; start 154 arch/powerpc/sysdev/fsl_pci.c (u64)res->start, (u64)size); start 250 arch/powerpc/sysdev/fsl_pci.c paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start); start 271 arch/powerpc/sysdev/fsl_pci.c (u64)hose->io_resource.start, start 274 arch/powerpc/sysdev/fsl_pci.c out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); start 280 arch/powerpc/sysdev/fsl_pci.c - hose->io_resource.start + 1) - 1)); start 509 arch/powerpc/sysdev/fsl_pci.c res->start = par ? par->start : 0; start 560 arch/powerpc/sysdev/fsl_pci.c (u64)rsrc.start, (u64)resource_size(&rsrc)); start 562 arch/powerpc/sysdev/fsl_pci.c pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc)); start 566 arch/powerpc/sysdev/fsl_pci.c setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, start 621 arch/powerpc/sysdev/fsl_pci.c (unsigned long long)rsrc.start, hose->first_busno, start 760 arch/powerpc/sysdev/fsl_pci.c pcie->cfg_type0 = ioremap(reg->start, resource_size(reg)); start 831 arch/powerpc/sysdev/fsl_pci.c if ((rsrc_reg.start & 0xfffff) == 0x8500) start 832 arch/powerpc/sysdev/fsl_pci.c rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300; start 833 arch/powerpc/sysdev/fsl_pci.c else if ((rsrc_reg.start & 0xfffff) == 0x8600) start 834 arch/powerpc/sysdev/fsl_pci.c rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380; start 839 arch/powerpc/sysdev/fsl_pci.c if ((rsrc_reg.start & 0xfffff) == 0x8500) start 864 arch/powerpc/sysdev/fsl_pci.c setup_indirect_pci(hose, rsrc_cfg.start, start 865 arch/powerpc/sysdev/fsl_pci.c rsrc_cfg.start + 4, 0); start 870 arch/powerpc/sysdev/fsl_pci.c (unsigned long long)rsrc_reg.start, hose->first_busno, start 1044 arch/powerpc/sysdev/fsl_pci.c addr >= res->start && addr <= res->end) start 470 arch/powerpc/sysdev/fsl_rio.c rio_regs_win = ioremap(regs.start, resource_size(®s)); start 510 arch/powerpc/sysdev/fsl_rio.c rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); start 634 arch/powerpc/sysdev/fsl_rio.c port->iores.start = range_start; start 635 arch/powerpc/sysdev/fsl_rio.c port->iores.end = port->iores.start + range_size - 1; start 642 arch/powerpc/sysdev/fsl_rio.c (u64)port->iores.start, (u64)port->iores.end); start 714 arch/powerpc/sysdev/fsl_rio.c port->iores.start >> 12); start 718 arch/powerpc/sysdev/fsl_rio.c priv->maint_win = ioremap(port->iores.start, start 329 arch/powerpc/sysdev/fsl_rmu.c if ((dbell->res->start start 142 arch/powerpc/sysdev/i8259.c .start = 0x20, start 149 arch/powerpc/sysdev/i8259.c .start = 0xa0, start 156 arch/powerpc/sysdev/i8259.c .start = 0x4d0, start 720 arch/powerpc/sysdev/ipic.c ipic->regs = ioremap(res.start, resource_size(&res)); start 118 arch/powerpc/sysdev/mmio_nvram.c nvram_addr = r.start; start 1256 arch/powerpc/sysdev/mpic.c phys_addr = r.start; start 194 arch/powerpc/sysdev/mpic_msgr.c msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); start 52 arch/powerpc/sysdev/of_rtc.c (unsigned long long)res->start, start 43 arch/powerpc/sysdev/rtc_cmos_setup.c if (res[0].start != RTC_PORT(0)) start 56 arch/powerpc/sysdev/rtc_cmos_setup.c res[1].start = 8; start 89 arch/powerpc/sysdev/tsi108_dev.c r[1].start = irq_of_parse_and_map(np, 0); start 125 arch/powerpc/sysdev/tsi108_dev.c tsi_eth_data.regs = r[0].start; start 126 arch/powerpc/sysdev/tsi108_dev.c tsi_eth_data.phyregs = res.start; start 221 arch/powerpc/sysdev/tsi108_pci.c rsrc.start, hose->first_busno, hose->last_busno); start 307 arch/powerpc/sysdev/xics/icp-native.c if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r))) start 57 arch/powerpc/sysdev/xilinx_pci.c dev->resource[i].start = 0; start 105 arch/powerpc/sysdev/xilinx_pci.c setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, start 106 arch/powerpc/sysdev/xilinx_pci.c r.start + XPLB_PCI_DATA, start 576 arch/powerpc/sysdev/xive/native.c tima = ioremap(r.start, resource_size(&r)); start 599 arch/powerpc/sysdev/xive/native.c kvmppc_set_xive_tima(cpu, r.start, tima); start 607 arch/powerpc/sysdev/xive/native.c xive_tima_os = r.start; start 803 arch/powerpc/sysdev/xive/spapr.c tima = ioremap(r.start, resource_size(&r)); start 30 arch/riscv/include/asm/cacheflush.h unsigned long start, start 54 arch/riscv/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 58 arch/riscv/include/asm/cacheflush.h static inline void flush_cache_vunmap(unsigned long start, unsigned long end) start 87 arch/riscv/include/asm/cacheflush.h #define flush_icache_range(start, end) flush_icache_all() start 83 arch/riscv/include/asm/sbi.h unsigned long start, start 86 arch/riscv/include/asm/sbi.h SBI_CALL_3(SBI_REMOTE_SFENCE_VMA, hart_mask, start, size); start 90 arch/riscv/include/asm/sbi.h unsigned long start, start 94 arch/riscv/include/asm/sbi.h SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid); start 28 arch/riscv/include/asm/tlbflush.h void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 35 arch/riscv/include/asm/tlbflush.h unsigned long start, unsigned long end) start 44 arch/riscv/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, start 150 arch/riscv/kernel/cpu.c .start = c_start, start 320 arch/riscv/kernel/perf_event.c riscv_pmu->pmu->start(event, PERF_EF_RELOAD); start 442 arch/riscv/kernel/perf_event.c .start = riscv_pmu_start, start 57 arch/riscv/kernel/sys_riscv.c SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, start 413 arch/riscv/mm/init.c phys_addr_t pa, start, end; start 426 arch/riscv/mm/init.c start = reg->base; start 427 arch/riscv/mm/init.c end = start + reg->size; start 429 arch/riscv/mm/init.c if (start >= end) start 433 arch/riscv/mm/init.c if (start <= __pa(PAGE_OFFSET) && start 435 arch/riscv/mm/init.c start = __pa(PAGE_OFFSET); start 437 arch/riscv/mm/init.c map_size = best_map_size(start, end - start); start 438 arch/riscv/mm/init.c for (pa = start; pa < end; pa += map_size) { start 464 arch/riscv/mm/init.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 467 arch/riscv/mm/init.c return vmemmap_populate_basepages(start, end, node); start 158 arch/riscv/mm/sifive_l2_cache.c l2_base = ioremap(res.start, resource_size(&res)); start 12 arch/riscv/mm/tlbflush.c static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, start 18 arch/riscv/mm/tlbflush.c sbi_remote_sfence_vma(hmask.bits, start, size); start 31 arch/riscv/mm/tlbflush.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 34 arch/riscv/mm/tlbflush.c __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); start 1546 arch/riscv/net/bpf_jit_comp.c static void bpf_flush_icache(void *start, void *end) start 1548 arch/riscv/net/bpf_jit_comp.c flush_icache_range((unsigned long)start, (unsigned long)end); start 95 arch/s390/boot/kaslr.c unsigned long base, start, end, kernel_size; start 130 arch/s390/boot/kaslr.c for_each_mem_detect_block(i, &start, &end) { start 132 arch/s390/boot/kaslr.c if (start >= memory_limit) start 137 arch/s390/boot/kaslr.c if (end - start < kernel_size) start 139 arch/s390/boot/kaslr.c block_sum += end - start - kernel_size; start 152 arch/s390/boot/kaslr.c for_each_mem_detect_block(i, &start, &end) { start 154 arch/s390/boot/kaslr.c if (start >= memory_limit) start 159 arch/s390/boot/kaslr.c if (end - start < kernel_size) start 161 arch/s390/boot/kaslr.c block_sum += end - start - kernel_size; start 163 arch/s390/boot/kaslr.c base = start + base - offset; start 48 arch/s390/boot/mem_detect.c void add_mem_detect_block(u64 start, u64 end) start 54 arch/s390/boot/mem_detect.c if (block->end == start) { start 61 arch/s390/boot/mem_detect.c block->start = start; start 99 arch/s390/boot/mem_detect.c unsigned long start; start 109 arch/s390/boot/mem_detect.c add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1); start 54 arch/s390/include/asm/airq.h unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, start 11 arch/s390/include/asm/alternative-asm.h .macro alt_len_check start,end start 20 arch/s390/include/asm/alternative.h void apply_alternatives(struct alt_instr *start, struct alt_instr *end); start 92 arch/s390/include/asm/gmap.h void (*notifier_call)(struct gmap *gmap, unsigned long start, start 142 arch/s390/include/asm/gmap.h int gmap_mprotect_notify(struct gmap *, unsigned long start, start 16 arch/s390/include/asm/mem_detect.h u64 start; start 38 arch/s390/include/asm/mem_detect.h void add_mem_detect_block(u64 start, u64 end); start 40 arch/s390/include/asm/mem_detect.h static inline int __get_mem_detect_block(u32 n, unsigned long *start, start 44 arch/s390/include/asm/mem_detect.h *start = 0; start 50 arch/s390/include/asm/mem_detect.h *start = (unsigned long)mem_detect.entries[n].start; start 53 arch/s390/include/asm/mem_detect.h *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start; start 72 arch/s390/include/asm/mem_detect.h static inline void get_mem_detect_reserved(unsigned long *start, start 75 arch/s390/include/asm/mem_detect.h *start = (unsigned long)mem_detect.entries_extended; start 84 arch/s390/include/asm/mem_detect.h unsigned long start; start 88 arch/s390/include/asm/mem_detect.h __get_mem_detect_block(mem_detect.count - 1, &start, &end); start 13 arch/s390/include/asm/nospec-branch.h void nospec_revert(s32 *start, s32 *end); start 41 arch/s390/include/asm/page.h void __storage_key_init_range(unsigned long start, unsigned long end); start 43 arch/s390/include/asm/page.h static inline void storage_key_init_range(unsigned long start, unsigned long end) start 46 arch/s390/include/asm/page.h __storage_key_init_range(start, end); start 1281 arch/s390/include/asm/pgtable.h static inline bool gup_fast_permitted(unsigned long start, unsigned long end) start 1682 arch/s390/include/asm/pgtable.h extern int vmem_add_mapping(unsigned long start, unsigned long size); start 1683 arch/s390/include/asm/pgtable.h extern int vmem_remove_mapping(unsigned long start, unsigned long size); start 97 arch/s390/include/asm/ptrace.h unsigned long start; /* PER starting address */ start 124 arch/s390/include/asm/tlbflush.h unsigned long start, unsigned long end) start 129 arch/s390/include/asm/tlbflush.h static inline void flush_tlb_kernel_range(unsigned long start, start 62 arch/s390/kernel/alternative.c static void __init_or_module __apply_alternatives(struct alt_instr *start, start 73 arch/s390/kernel/alternative.c for (a = start; a < end; a++) { start 102 arch/s390/kernel/alternative.c void __init_or_module apply_alternatives(struct alt_instr *start, start 106 arch/s390/kernel/alternative.c __apply_alternatives(start, end); start 205 arch/s390/kernel/asm-offsets.c OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); start 563 arch/s390/kernel/crash_dump.c phys_addr_t start, end; start 567 arch/s390/kernel/crash_dump.c MEMBLOCK_NONE, &start, &end, NULL) { start 568 arch/s390/kernel/crash_dump.c phdr->p_filesz = end - start; start 570 arch/s390/kernel/crash_dump.c phdr->p_offset = start; start 571 arch/s390/kernel/crash_dump.c phdr->p_vaddr = start; start 572 arch/s390/kernel/crash_dump.c phdr->p_paddr = start; start 573 arch/s390/kernel/crash_dump.c phdr->p_memsz = end - start; start 101 arch/s390/kernel/diag.c .start = show_diag_stat_start, start 492 arch/s390/kernel/dis.c int start, end, opsize, hops, i; start 497 arch/s390/kernel/dis.c for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { start 498 arch/s390/kernel/dis.c addr = regs->psw.addr - 34 + start; start 499 arch/s390/kernel/dis.c if (__copy_from_user(code + start - 2, start 511 arch/s390/kernel/dis.c if ((regs->psw.addr & 1) || start >= end) { start 516 arch/s390/kernel/dis.c while (start < 32) { start 517 arch/s390/kernel/dis.c for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { start 518 arch/s390/kernel/dis.c if (!find_insn(code + start + i)) start 520 arch/s390/kernel/dis.c i += insn_length(code[start + i]); start 522 arch/s390/kernel/dis.c if (start + i == 32) start 525 arch/s390/kernel/dis.c start += 2; start 531 arch/s390/kernel/dis.c while (start < end && hops < 8) { start 532 arch/s390/kernel/dis.c opsize = insn_length(code[start]); start 533 arch/s390/kernel/dis.c if (start + opsize == 32) start 535 arch/s390/kernel/dis.c else if (start == 32) start 539 arch/s390/kernel/dis.c addr = regs->psw.addr + start - 32; start 541 arch/s390/kernel/dis.c if (start + opsize >= end) start 544 arch/s390/kernel/dis.c ptr += sprintf(ptr, "%02x", code[start + i]); start 548 arch/s390/kernel/dis.c ptr += print_insn(ptr, code + start, addr); start 549 arch/s390/kernel/dis.c start += opsize; start 44 arch/s390/kernel/kexec_elf.c buf.mem += crashk_res.start; start 28 arch/s390/kernel/kexec_image.c buf.mem += crashk_res.start; start 208 arch/s390/kernel/kprobes.c per_kprobe.start = ip; start 95 arch/s390/kernel/machine_kexec.c start_kdump = (void *)((struct kimage *) image)->start; start 148 arch/s390/kernel/machine_kexec.c int (*start_kdump)(int) = (void *)image->start; start 184 arch/s390/kernel/machine_kexec.c size = begin - crashk_res.start; start 186 arch/s390/kernel/machine_kexec.c os_info_crashkernel_add(crashk_res.start, size); start 199 arch/s390/kernel/machine_kexec.c set_memory_ro(crashk_res.start, size >> PAGE_SHIFT); start 201 arch/s390/kernel/machine_kexec.c set_memory_rw(crashk_res.start, size >> PAGE_SHIFT); start 223 arch/s390/kernel/machine_kexec.c diag10_range(PFN_DOWN(crashk_res.start), start 224 arch/s390/kernel/machine_kexec.c PFN_DOWN(crashk_res.end - crashk_res.start + 1)); start 287 arch/s390/kernel/machine_kexec.c (*data_mover)(&image->head, image->start); start 101 arch/s390/kernel/machine_kexec_file.c &crashk_res.start, start 102 arch/s390/kernel/machine_kexec_file.c sizeof(crashk_res.start), start 107 arch/s390/kernel/machine_kexec_file.c crash_size = crashk_res.end - crashk_res.start + 1; start 127 arch/s390/kernel/machine_kexec_file.c buf.mem += crashk_res.start; start 151 arch/s390/kernel/machine_kexec_file.c buf.mem += crashk_res.start; start 179 arch/s390/kernel/machine_kexec_file.c buf.mem += crashk_res.start; start 238 arch/s390/kernel/machine_kexec_file.c data.parm->oldmem_base = crashk_res.start; start 239 arch/s390/kernel/machine_kexec_file.c data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1; start 254 arch/s390/kernel/machine_kexec_file.c restart_psw += image->start; start 256 arch/s390/kernel/machine_kexec_file.c image->start = 0; start 99 arch/s390/kernel/nospec-branch.c static void __init_or_module __nospec_revert(s32 *start, s32 *end) start 107 arch/s390/kernel/nospec-branch.c for (epo = start; epo < end; epo++) { start 162 arch/s390/kernel/nospec-branch.c void __init_or_module nospec_revert(s32 *start, s32 *end) start 165 arch/s390/kernel/nospec-branch.c __nospec_revert(start, end); start 539 arch/s390/kernel/perf_cpum_cf.c .start = cpumf_pmu_start, start 34 arch/s390/kernel/perf_cpum_cf_diag.c unsigned char start[PAGE_SIZE]; /* Counter set at event start */ start 451 arch/s390/kernel/perf_cpum_cf_diag.c ctrstart = (struct cf_ctrset_entry *)(csd->start + offset); start 473 arch/s390/kernel/perf_cpum_cf_diag.c trailer_start = (struct cf_trailer_entry *)(csd->start + offset); start 538 arch/s390/kernel/perf_cpum_cf_diag.c csd->used = cf_diag_getctr(csd->start, sizeof(csd->start), start 638 arch/s390/kernel/perf_cpum_cf_diag.c .start = cf_diag_start, start 682 arch/s390/kernel/perf_cpum_cf_diag.c if (need > sizeof(((struct cf_diag_csd *)0)->start)) { start 1345 arch/s390/kernel/perf_cpum_sf.c #define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0) start 2016 arch/s390/kernel/perf_cpum_sf.c .start = cpumsf_pmu_start, start 206 arch/s390/kernel/processor.c .start = c_start, start 88 arch/s390/kernel/ptrace.c new.start = thread->per_user.start; start 102 arch/s390/kernel/ptrace.c new.start = 0; start 163 arch/s390/kernel/ptrace.c 0 : child->thread.per_user.start; start 174 arch/s390/kernel/ptrace.c return child->thread.per_user.start; start 321 arch/s390/kernel/ptrace.c child->thread.per_user.start = data; start 548 arch/s390/kernel/ptrace.c 0 : child->thread.per_user.start; start 559 arch/s390/kernel/ptrace.c return (__u32) child->thread.per_user.start; start 680 arch/s390/kernel/ptrace.c child->thread.per_user.start = data; start 505 arch/s390/kernel/setup.c code_resource.start = (unsigned long) _text; start 507 arch/s390/kernel/setup.c data_resource.start = (unsigned long) _etext; start 509 arch/s390/kernel/setup.c bss_resource.start = (unsigned long) __bss_start; start 520 arch/s390/kernel/setup.c res->start = reg->base; start 526 arch/s390/kernel/setup.c if (std_res->start < res->start || start 527 arch/s390/kernel/setup.c std_res->start > res->end) start 536 arch/s390/kernel/setup.c std_res->start = res->end + 1; start 552 arch/s390/kernel/setup.c memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0); start 553 arch/s390/kernel/setup.c memblock_reserve(crashk_res.start, resource_size(&crashk_res)); start 623 arch/s390/kernel/setup.c if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) start 722 arch/s390/kernel/setup.c crashk_res.start = crash_base; start 758 arch/s390/kernel/setup.c unsigned long start, size; start 760 arch/s390/kernel/setup.c get_mem_detect_reserved(&start, &size); start 762 arch/s390/kernel/setup.c memblock_reserve(start, size); start 767 arch/s390/kernel/setup.c unsigned long start, size; start 769 arch/s390/kernel/setup.c get_mem_detect_reserved(&start, &size); start 771 arch/s390/kernel/setup.c memblock_free(start, size); start 774 arch/s390/kernel/setup.c static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size) start 777 arch/s390/kernel/setup.c start, start + size - 1); start 778 arch/s390/kernel/setup.c memblock_add_range(&memblock.memory, start, size, 0, 0); start 779 arch/s390/kernel/setup.c memblock_add_range(&memblock.physmem, start, size, 0, 0); start 799 arch/s390/kernel/setup.c unsigned long start, end; start 806 arch/s390/kernel/setup.c for_each_mem_detect_block(i, &start, &end) start 807 arch/s390/kernel/setup.c memblock_physmem_add(start, end - start); start 375 arch/s390/kernel/sysinfo.c .start = service_level_start, start 71 arch/s390/kernel/uprobes.c regs->psw.addr >= current->thread.per_user.start && start 256 arch/s390/kernel/uprobes.c if ((void *)current->thread.per_user.start > (addr + len)) start 23 arch/s390/kvm/diag.c unsigned long start, end; start 26 arch/s390/kvm/diag.c start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; start 30 arch/s390/kvm/diag.c if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end start 31 arch/s390/kvm/diag.c || start < 2 * PAGE_SIZE) start 34 arch/s390/kvm/diag.c VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); start 40 arch/s390/kvm/diag.c if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) { start 41 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, start, end); start 49 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, start, prefix); start 50 arch/s390/kvm/diag.c if (start <= prefix) start 19 arch/s390/kvm/guestdbg.c static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) start 31 arch/s390/kvm/guestdbg.c if ((*start == 0) && (*stop == 0)) { start 32 arch/s390/kvm/guestdbg.c *start = estart; start 34 arch/s390/kvm/guestdbg.c } else if (*start <= *stop) { start 36 arch/s390/kvm/guestdbg.c if (estart < *start) start 37 arch/s390/kvm/guestdbg.c *start = estart; start 45 arch/s390/kvm/guestdbg.c } else if (estop > *start) { start 46 arch/s390/kvm/guestdbg.c if (estart < *start) start 47 arch/s390/kvm/guestdbg.c *start = estart; start 50 arch/s390/kvm/guestdbg.c else if ((estop - *stop) < (*start - estart)) start 53 arch/s390/kvm/guestdbg.c *start = estart; start 61 arch/s390/kvm/guestdbg.c unsigned long start, len; start 80 arch/s390/kvm/guestdbg.c start = vcpu->arch.guestdbg.hw_bp_info[i].addr; start 87 arch/s390/kvm/guestdbg.c if (start < MAX_INST_SIZE) { start 88 arch/s390/kvm/guestdbg.c len += start; start 89 arch/s390/kvm/guestdbg.c start = 0; start 91 arch/s390/kvm/guestdbg.c start -= MAX_INST_SIZE; start 95 arch/s390/kvm/guestdbg.c extend_address_range(cr10, cr11, start, len); start 101 arch/s390/kvm/guestdbg.c unsigned long start, len; start 122 arch/s390/kvm/guestdbg.c start = vcpu->arch.guestdbg.hw_wp_info[i].addr; start 125 arch/s390/kvm/guestdbg.c extend_address_range(cr10, cr11, start, len); start 235 arch/s390/kvm/kvm-s390.c static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, start 1918 arch/s390/kvm/kvm-s390.c int start = 0, end = slots->used_slots; start 1926 arch/s390/kvm/kvm-s390.c while (start < end) { start 1927 arch/s390/kvm/kvm-s390.c slot = start + (end - start) / 2; start 1932 arch/s390/kvm/kvm-s390.c start = slot + 1; start 1935 arch/s390/kvm/kvm-s390.c if (start >= slots->used_slots) start 1938 arch/s390/kvm/kvm-s390.c if (gfn >= memslots[start].base_gfn && start 1939 arch/s390/kvm/kvm-s390.c gfn < memslots[start].base_gfn + memslots[start].npages) { start 1940 arch/s390/kvm/kvm-s390.c atomic_set(&slots->lru_slot, start); start 1943 arch/s390/kvm/kvm-s390.c return start; start 3150 arch/s390/kvm/kvm-s390.c static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, start 3160 arch/s390/kvm/kvm-s390.c if (start >= 1UL << 31) start 3166 arch/s390/kvm/kvm-s390.c if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { start 3168 arch/s390/kvm/kvm-s390.c start, end); start 274 arch/s390/kvm/kvm-s390.h void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, start 346 arch/s390/kvm/priv.c unsigned long start, end; start 371 arch/s390/kvm/priv.c start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; start 372 arch/s390/kvm/priv.c start = kvm_s390_logical_to_effective(vcpu, start); start 375 arch/s390/kvm/priv.c end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); start 377 arch/s390/kvm/priv.c start = kvm_s390_real_to_abs(vcpu, start); start 378 arch/s390/kvm/priv.c end = start + PAGE_SIZE; start 381 arch/s390/kvm/priv.c while (start != end) { start 382 arch/s390/kvm/priv.c unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); start 403 arch/s390/kvm/priv.c start += PAGE_SIZE; start 1011 arch/s390/kvm/priv.c unsigned long start, end; start 1041 arch/s390/kvm/priv.c start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; start 1042 arch/s390/kvm/priv.c start = kvm_s390_logical_to_effective(vcpu, start); start 1045 arch/s390/kvm/priv.c if (kvm_s390_check_low_addr_prot_real(vcpu, start)) start 1052 arch/s390/kvm/priv.c start = kvm_s390_real_to_abs(vcpu, start); start 1053 arch/s390/kvm/priv.c end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); start 1056 arch/s390/kvm/priv.c end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); start 1064 arch/s390/kvm/priv.c end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1); start 1070 arch/s390/kvm/priv.c while (start != end) { start 1075 arch/s390/kvm/priv.c vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); start 1080 arch/s390/kvm/priv.c if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE)) start 1105 arch/s390/kvm/priv.c start += PAGE_SIZE; start 560 arch/s390/kvm/vsie.c void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, start 571 arch/s390/kvm/vsie.c if (start >= 1UL << 31) start 589 arch/s390/kvm/vsie.c if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1) start 40 arch/s390/mm/extmem.c unsigned long start; /* last byte type */ start 203 arch/s390/mm/extmem.c seg->vm_segtype = qout->range[0].start & 0xff; start 209 arch/s390/mm/extmem.c unsigned long start = qout->segstart >> PAGE_SHIFT; start 211 arch/s390/mm/extmem.c if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && start 212 arch/s390/mm/extmem.c ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { start 216 arch/s390/mm/extmem.c if (start != qout->range[i].start >> PAGE_SHIFT) { start 220 arch/s390/mm/extmem.c start = (qout->range[i].end >> PAGE_SHIFT) + 1; start 327 arch/s390/mm/extmem.c seg->res->start = seg->start_addr; start 585 arch/s390/mm/extmem.c seg->range[i].start >> PAGE_SHIFT, start 587 arch/s390/mm/extmem.c segtype_string[seg->range[i].start & 0xff]); start 760 arch/s390/mm/gmap.c static void gmap_call_notifier(struct gmap *gmap, unsigned long start, start 766 arch/s390/mm/gmap.c nb->notifier_call(gmap, start, end); start 2177 arch/s390/mm/gmap.c unsigned long start, end, bits, raddr; start 2187 arch/s390/mm/gmap.c start = sg->orig_asce & _ASCE_ORIGIN; start 2188 arch/s390/mm/gmap.c end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE; start 2189 arch/s390/mm/gmap.c if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start && start 2512 arch/s390/mm/gmap.c static int __zap_zero_pages(pmd_t *pmd, unsigned long start, start 2517 arch/s390/mm/gmap.c for (addr = start; addr != end; addr += PAGE_SIZE) { start 2573 arch/s390/mm/gmap.c unsigned long start, end; start 2586 arch/s390/mm/gmap.c start = pmd_val(*pmd) & HPAGE_MASK; start 2587 arch/s390/mm/gmap.c end = start + HPAGE_SIZE - 1; start 2588 arch/s390/mm/gmap.c __storage_key_init_range(start, end); start 223 arch/s390/mm/init.c unsigned long start; start 230 arch/s390/mm/init.c unsigned long start, end; start 233 arch/s390/mm/init.c start = cma_get_base(cma); start 234 arch/s390/mm/init.c end = start + cma_get_size(cma); start 235 arch/s390/mm/init.c if (end < mem_data->start) start 237 arch/s390/mm/init.c if (start >= mem_data->end) start 250 arch/s390/mm/init.c mem_data.start = arg->start_pfn << PAGE_SHIFT; start 251 arch/s390/mm/init.c mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT); start 269 arch/s390/mm/init.c int arch_add_memory(int nid, u64 start, u64 size, start 272 arch/s390/mm/init.c unsigned long start_pfn = PFN_DOWN(start); start 279 arch/s390/mm/init.c rc = vmem_add_mapping(start, size); start 285 arch/s390/mm/init.c vmem_remove_mapping(start, size); start 289 arch/s390/mm/init.c void arch_remove_memory(int nid, u64 start, u64 size, start 292 arch/s390/mm/init.c unsigned long start_pfn = start >> PAGE_SHIFT; start 296 arch/s390/mm/init.c vmem_remove_mapping(start, size); start 188 arch/s390/mm/page-states.c unsigned long start, end, ix; start 197 arch/s390/mm/page-states.c start = memblock_region_memory_base_pfn(reg); start 199 arch/s390/mm/page-states.c page = pfn_to_page(start); start 200 arch/s390/mm/page-states.c for (ix = start; ix < end; ix++, page++) { start 22 arch/s390/mm/pageattr.c void __storage_key_init_range(unsigned long start, unsigned long end) start 26 arch/s390/mm/pageattr.c while (start < end) { start 30 arch/s390/mm/pageattr.c boundary = (start + size) & ~(size - 1); start 33 arch/s390/mm/pageattr.c start = sske_frame(start, PAGE_DEFAULT_KEY); start 34 arch/s390/mm/pageattr.c } while (start < boundary); start 38 arch/s390/mm/pageattr.c page_set_storage_key(start, PAGE_DEFAULT_KEY, 1); start 39 arch/s390/mm/pageattr.c start += PAGE_SIZE; start 26 arch/s390/mm/vmem.c unsigned long start; start 69 arch/s390/mm/vmem.c static int vmem_add_mem(unsigned long start, unsigned long size) start 73 arch/s390/mm/vmem.c unsigned long end = start + size; start 74 arch/s390/mm/vmem.c unsigned long address = start; start 154 arch/s390/mm/vmem.c static void vmem_remove_range(unsigned long start, unsigned long size) start 157 arch/s390/mm/vmem.c unsigned long end = start + size; start 158 arch/s390/mm/vmem.c unsigned long address = start; start 204 arch/s390/mm/vmem.c flush_tlb_kernel_range(start, end); start 213 arch/s390/mm/vmem.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 217 arch/s390/mm/vmem.c unsigned long address = start; start 231 arch/s390/mm/vmem.c for (address = start; address < end;) { start 299 arch/s390/mm/vmem.c void vmemmap_free(unsigned long start, unsigned long end, start 312 arch/s390/mm/vmem.c if (seg->start + seg->size > VMEM_MAX_PHYS || start 313 arch/s390/mm/vmem.c seg->start + seg->size < seg->start) start 317 arch/s390/mm/vmem.c if (seg->start >= tmp->start + tmp->size) start 319 arch/s390/mm/vmem.c if (seg->start + seg->size <= tmp->start) start 338 arch/s390/mm/vmem.c vmem_remove_range(seg->start, seg->size); start 341 arch/s390/mm/vmem.c int vmem_remove_mapping(unsigned long start, unsigned long size) start 350 arch/s390/mm/vmem.c if (seg->start == start && seg->size == size) start 354 arch/s390/mm/vmem.c if (seg->start != start || seg->size != size) start 365 arch/s390/mm/vmem.c int vmem_add_mapping(unsigned long start, unsigned long size) start 375 arch/s390/mm/vmem.c seg->start = start; start 382 arch/s390/mm/vmem.c ret = vmem_add_mem(start, size); start 440 arch/s390/mm/vmem.c seg->start = reg->base; start 354 arch/s390/net/bpf_jit_comp.c static int get_start(struct bpf_jit *jit, int start) start 358 arch/s390/net/bpf_jit_comp.c for (i = start; i <= 15; i++) { start 368 arch/s390/net/bpf_jit_comp.c static int get_end(struct bpf_jit *jit, int start) start 372 arch/s390/net/bpf_jit_comp.c for (i = start; i < 15; i++) { start 58 arch/s390/numa/toptree.h #define toptree_for_each_sibling(ptree, start) \ start 59 arch/s390/numa/toptree.h toptree_for_each(ptree, start->parent, start->level) start 407 arch/s390/pci/pci.c .start = 0, start 425 arch/s390/pci/pci.c pdev->resource[i].start = start 428 arch/s390/pci/pci.c pdev->resource[i].start = (resource_size_t __force) start 430 arch/s390/pci/pci.c pdev->resource[i].end = pdev->resource[i].start + len - 1; start 459 arch/s390/pci/pci.c pdev->resource[i].start); start 486 arch/s390/pci/pci.c static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, start 495 arch/s390/pci/pci.c r->start = start; start 496 arch/s390/pci/pci.c r->end = r->start + size - 1; start 261 arch/s390/pci/pci_dma.c unsigned long start, int size) start 269 arch/s390/pci/pci_dma.c start, size, zdev->start_dma >> PAGE_SHIFT, start 488 arch/s390/pci/pci_dma.c struct scatterlist *s = sg, *start = sg, *dma = sg; start 502 arch/s390/pci/pci_dma.c if (__s390_dma_map_sg(dev, start, size, start 510 arch/s390/pci/pci_dma.c start = s; start 516 arch/s390/pci/pci_dma.c if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir)) start 25 arch/s390/purgatory/purgatory.c sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); start 53 arch/sh/boards/board-apsh4a3a.c .start = 0x00000000, start 77 arch/sh/boards/board-apsh4a3a.c .start = 0xA4000000, start 83 arch/sh/boards/board-apsh4a3a.c .start = evt2irq(0x200), start 29 arch/sh/boards/board-apsh4ad0a.c .start = 0xA4000000, start 35 arch/sh/boards/board-apsh4ad0a.c .start = evt2irq(0x200), start 39 arch/sh/boards/board-edosk7705.c .start = SMC_IOADDR, start 44 arch/sh/boards/board-edosk7705.c .start = ETHERNET_IRQ, start 62 arch/sh/boards/board-edosk7760.c .start = 0x00000000, start 84 arch/sh/boards/board-edosk7760.c .start = SH7760_I2C1_MMIO, start 88 arch/sh/boards/board-edosk7760.c .start = evt2irq(0x9e0), start 107 arch/sh/boards/board-edosk7760.c .start = SH7760_I2C0_MMIO, start 111 arch/sh/boards/board-edosk7760.c .start = evt2irq(0x9c0), start 134 arch/sh/boards/board-edosk7760.c .start = SMC_IOADDR, start 139 arch/sh/boards/board-edosk7760.c .start = evt2irq(0x2a0), start 45 arch/sh/boards/board-espt.c .start = 0, start 63 arch/sh/boards/board-espt.c .start = 0xFEE00800, /* use eth1 */ start 67 arch/sh/boards/board-espt.c .start = 0xFEE01800, /* TSU */ start 72 arch/sh/boards/board-espt.c .start = evt2irq(0x920), /* irq number */ start 249 arch/sh/boards/board-magicpanelr2.c .start = 0xa8000000, start 254 arch/sh/boards/board-magicpanelr2.c .start = evt2irq(0x660), start 279 arch/sh/boards/board-magicpanelr2.c .start = PA_LED, start 328 arch/sh/boards/board-magicpanelr2.c .start = 0x00000000, start 37 arch/sh/boards/board-polaris.c .start = PA_EXT5, start 43 arch/sh/boards/board-polaris.c .start = IRQ0_IRQ, start 74 arch/sh/boards/board-polaris.c .start = PORT_PCDR, start 35 arch/sh/boards/board-sh2007.c .start = SMC0_BASE, start 40 arch/sh/boards/board-sh2007.c .start = evt2irq(0x240), start 48 arch/sh/boards/board-sh2007.c .start = SMC1_BASE, start 53 arch/sh/boards/board-sh2007.c .start = evt2irq(0x280), start 81 arch/sh/boards/board-sh2007.c .start = CF_BASE + CF_OFFSET, start 86 arch/sh/boards/board-sh2007.c .start = CF_BASE + CF_OFFSET + 0x206, start 91 arch/sh/boards/board-sh2007.c .start = evt2irq(0x2c0), start 27 arch/sh/boards/board-sh7757lcr.c .start = 0xffec005c, /* PUDR */ start 64 arch/sh/boards/board-sh7757lcr.c .start = 0xfef00000, start 68 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0xc80), start 91 arch/sh/boards/board-sh7757lcr.c .start = 0xfef00800, start 95 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0xc80), start 129 arch/sh/boards/board-sh7757lcr.c .start = 0xfee00000, start 134 arch/sh/boards/board-sh7757lcr.c .start = 0xfee01800, start 138 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0x2960), start 162 arch/sh/boards/board-sh7757lcr.c .start = 0xfee00800, start 167 arch/sh/boards/board-sh7757lcr.c .start = 0xfee01800, start 171 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0x2980), start 205 arch/sh/boards/board-sh7757lcr.c .start = 0xffcb0000, start 210 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0x1c60), start 214 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0x1c80), start 247 arch/sh/boards/board-sh7757lcr.c .start = 0xffe50000, start 252 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0x480), start 283 arch/sh/boards/board-sh7757lcr.c .start = 0xfe450000, start 288 arch/sh/boards/board-sh7757lcr.c .start = evt2irq(0x840), start 37 arch/sh/boards/board-sh7785lcr.c .start = PLD_LEDCR, start 80 arch/sh/boards/board-sh7785lcr.c .start = NOR_FLASH_ADDR, start 102 arch/sh/boards/board-sh7785lcr.c .start = R8A66597_ADDR, start 107 arch/sh/boards/board-sh7785lcr.c .start = evt2irq(0x240), start 127 arch/sh/boards/board-sh7785lcr.c .start = SM107_MEM_ADDR, start 132 arch/sh/boards/board-sh7785lcr.c .start = SM107_REG_ADDR, start 137 arch/sh/boards/board-sh7785lcr.c .start = evt2irq(0x340), start 220 arch/sh/boards/board-sh7785lcr.c .start = PCA9564_PROTO_32BIT_ADDR, start 225 arch/sh/boards/board-sh7785lcr.c .start = evt2irq(0x380), start 233 arch/sh/boards/board-sh7785lcr.c .start = PCA9564_ADDR, start 238 arch/sh/boards/board-sh7785lcr.c .start = evt2irq(0x380), start 54 arch/sh/boards/board-urquell.c .start = BOARDREG(SLEDR), start 74 arch/sh/boards/board-urquell.c .start = 0x05800300, start 79 arch/sh/boards/board-urquell.c .start = evt2irq(0x360), start 127 arch/sh/boards/board-urquell.c .start = NOR_FLASH_ADDR, start 59 arch/sh/boards/mach-ap325rxa/setup.c .start = 0xb6080000, start 64 arch/sh/boards/mach-ap325rxa/setup.c .start = evt2irq(0x660), start 119 arch/sh/boards/mach-ap325rxa/setup.c .start = 0x00000000, start 144 arch/sh/boards/mach-ap325rxa/setup.c .start = 0xa4530000, start 242 arch/sh/boards/mach-ap325rxa/setup.c .start = 0xfe940000, /* P4-only space */ start 247 arch/sh/boards/mach-ap325rxa/setup.c .start = evt2irq(0x580), start 285 arch/sh/boards/mach-ap325rxa/setup.c .start = 0xfe910000, start 290 arch/sh/boards/mach-ap325rxa/setup.c .start = evt2irq(0x880), start 317 arch/sh/boards/mach-ap325rxa/setup.c .start = 0x04ce0000, start 322 arch/sh/boards/mach-ap325rxa/setup.c .start = evt2irq(0xe80), start 344 arch/sh/boards/mach-ap325rxa/setup.c .start = 0x04cf0000, start 349 arch/sh/boards/mach-ap325rxa/setup.c .start = evt2irq(0x4e0), start 97 arch/sh/boards/mach-ecovec24/setup.c .start = 0xA405012C, /* PTG */ start 135 arch/sh/boards/mach-ecovec24/setup.c .start = 0x00000000, start 154 arch/sh/boards/mach-ecovec24/setup.c .start = SH_ETH_ADDR, start 159 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xd60), start 193 arch/sh/boards/mach-ecovec24/setup.c .start = 0xa4d80000, start 198 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xa20), start 229 arch/sh/boards/mach-ecovec24/setup.c .start = 0xa4d90000, start 234 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xa40), start 286 arch/sh/boards/mach-ecovec24/setup.c .start = 0xa4d90000, start 291 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xa40), start 355 arch/sh/boards/mach-ecovec24/setup.c .start = 0xfe940000, start 360 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xf40), start 412 arch/sh/boards/mach-ecovec24/setup.c .start = 0xfe910000, start 417 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x880), start 449 arch/sh/boards/mach-ecovec24/setup.c .start = 0xfe914000, start 454 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x9e0), start 556 arch/sh/boards/mach-ecovec24/setup.c .start = 0x044b0000, start 561 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xbe0), start 713 arch/sh/boards/mach-ecovec24/setup.c .start = 0x04ce0000, start 718 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xe80), start 754 arch/sh/boards/mach-ecovec24/setup.c .start = 0x04cf0000, start 759 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x4e0), start 820 arch/sh/boards/mach-ecovec24/setup.c .start = 0xa4c40000, start 825 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xc80), start 854 arch/sh/boards/mach-ecovec24/setup.c .start = 0xFE3C0000, start 859 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0xf80), start 899 arch/sh/boards/mach-ecovec24/setup.c .start = 0xA45D0000, start 904 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x480), start 936 arch/sh/boards/mach-ecovec24/setup.c .start = 0xfe960000, start 941 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x8e0), start 961 arch/sh/boards/mach-ecovec24/setup.c .start = 0xA4CA0000, start 967 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x5a0), start 972 arch/sh/boards/mach-ecovec24/setup.c .start = evt2irq(0x5c0), start 52 arch/sh/boards/mach-highlander/psw.c .start = IRQ_PSW, start 40 arch/sh/boards/mach-highlander/setup.c .start = 0xA4200000, start 45 arch/sh/boards/mach-highlander/setup.c .start = IRQ_EXT1, /* irq number */ start 71 arch/sh/boards/mach-highlander/setup.c .start = 0xb0000000, start 77 arch/sh/boards/mach-highlander/setup.c .start = IRQ_EXT4, /* irq number */ start 97 arch/sh/boards/mach-highlander/setup.c .start = PA_AREA5_IO + 0x1000, start 102 arch/sh/boards/mach-highlander/setup.c .start = PA_AREA5_IO + 0x80c, start 107 arch/sh/boards/mach-highlander/setup.c .start = IRQ_CF, start 128 arch/sh/boards/mach-highlander/setup.c .start = PA_OBLED, start 167 arch/sh/boards/mach-highlander/setup.c .start = 0xa5800400, start 170 arch/sh/boards/mach-highlander/setup.c .start = 0xa4100400, start 176 arch/sh/boards/mach-highlander/setup.c .start = IRQ_AX88796, start 226 arch/sh/boards/mach-highlander/setup.c .start = PA_NORFLASH_ADDR, start 243 arch/sh/boards/mach-highlander/setup.c .start = PA_SMCR, start 248 arch/sh/boards/mach-highlander/setup.c .start = IRQ_SMBUS, start 27 arch/sh/boards/mach-hp6xx/setup.c .start = 0x15000000 + 0x1f0, start 32 arch/sh/boards/mach-hp6xx/setup.c .start = 0x15000000 + 0x1fe, start 37 arch/sh/boards/mach-hp6xx/setup.c .start = evt2irq(0xba0), start 94 arch/sh/boards/mach-hp6xx/setup.c .start = dac_audio_start, start 75 arch/sh/boards/mach-kfr2r09/setup.c .start = 0x00000000, start 93 arch/sh/boards/mach-kfr2r09/setup.c .start = 0x10000000, start 122 arch/sh/boards/mach-kfr2r09/setup.c .start = 0x044b0000, start 127 arch/sh/boards/mach-kfr2r09/setup.c .start = evt2irq(0xbe0), start 185 arch/sh/boards/mach-kfr2r09/setup.c .start = 0xfe940000, /* P4-only space */ start 190 arch/sh/boards/mach-kfr2r09/setup.c .start = evt2irq(0xf40), start 221 arch/sh/boards/mach-kfr2r09/setup.c .start = 0x04d80000, start 226 arch/sh/boards/mach-kfr2r09/setup.c .start = evt2irq(0xa20), start 260 arch/sh/boards/mach-kfr2r09/setup.c .start = 0xfe910000, start 265 arch/sh/boards/mach-kfr2r09/setup.c .start = evt2irq(0x880), start 311 arch/sh/boards/mach-kfr2r09/setup.c .start = 0x04ce0000, start 316 arch/sh/boards/mach-kfr2r09/setup.c .start = evt2irq(0xe80), start 49 arch/sh/boards/mach-landisk/psw.c .start = IRQ_POWER, start 56 arch/sh/boards/mach-landisk/psw.c .start = IRQ_BUTTON, start 68 arch/sh/boards/mach-landisk/setup.c cf_ide_resources[0].start = (unsigned long)cf_ide_base + 0x40; start 71 arch/sh/boards/mach-landisk/setup.c cf_ide_resources[1].start = (unsigned long)cf_ide_base + 0x2c; start 74 arch/sh/boards/mach-landisk/setup.c cf_ide_resources[2].start = IRQ_FATA; start 20 arch/sh/boards/mach-lboxre2/setup.c .start = 0x1f0, start 25 arch/sh/boards/mach-lboxre2/setup.c .start = 0x1f0 + 0x206, start 30 arch/sh/boards/mach-lboxre2/setup.c .start = IRQ_CF0, start 62 arch/sh/boards/mach-lboxre2/setup.c cf_ide_resources[0].start += cf0_io_base ; start 64 arch/sh/boards/mach-lboxre2/setup.c cf_ide_resources[1].start += cf0_io_base ; start 22 arch/sh/boards/mach-microdev/setup.c .start = 0x300, start 27 arch/sh/boards/mach-microdev/setup.c .start = MICRODEV_LINUX_IRQ_ETHERNET, start 157 arch/sh/boards/mach-microdev/setup.c .start = 0x07200000, start 162 arch/sh/boards/mach-microdev/setup.c .start = 0x07000000, start 58 arch/sh/boards/mach-migor/setup.c .start = 0x10000300, start 63 arch/sh/boards/mach-migor/setup.c .start = evt2irq(0x600), /* IRQ0 */ start 92 arch/sh/boards/mach-migor/setup.c .start = 0x044b0000, start 97 arch/sh/boards/mach-migor/setup.c .start = evt2irq(0xbe0), start 141 arch/sh/boards/mach-migor/setup.c .start = 0x00000000, start 204 arch/sh/boards/mach-migor/setup.c .start = 0x18000000, start 286 arch/sh/boards/mach-migor/setup.c .start = 0xfe940000, /* P4-only space */ start 291 arch/sh/boards/mach-migor/setup.c .start = evt2irq(0x580), start 328 arch/sh/boards/mach-migor/setup.c .start = 0xfe910000, start 333 arch/sh/boards/mach-migor/setup.c .start = evt2irq(0x880), start 376 arch/sh/boards/mach-migor/setup.c .start = 0x04ce0000, start 381 arch/sh/boards/mach-migor/setup.c .start = evt2irq(0xe80), start 28 arch/sh/boards/mach-r2d/setup.c .start = PA_AREA5_IO + 0x1000, start 33 arch/sh/boards/mach-r2d/setup.c .start = PA_AREA5_IO + 0x80c, start 39 arch/sh/boards/mach-r2d/setup.c .start = IRQ_CF_IDE, start 80 arch/sh/boards/mach-r2d/setup.c .start = 0xffe00000, start 98 arch/sh/boards/mach-r2d/setup.c .start = PA_OUTPORT, start 113 arch/sh/boards/mach-r2d/setup.c .start = 0x10000000, start 118 arch/sh/boards/mach-r2d/setup.c .start = 0x13e00000, start 123 arch/sh/boards/mach-r2d/setup.c .start = IRQ_VOYAGER, start 213 arch/sh/boards/mach-r2d/setup.c .start = 0x00000000, start 29 arch/sh/boards/mach-rsk/devices-rsk7203.c .start = 0x24000000, start 34 arch/sh/boards/mach-rsk/devices-rsk7203.c .start = 64, start 25 arch/sh/boards/mach-rsk/devices-rsk7264.c .start = 0x28000000, start 30 arch/sh/boards/mach-rsk/devices-rsk7264.c .start = 65, start 27 arch/sh/boards/mach-rsk/devices-rsk7269.c .start = 0x24000000, start 32 arch/sh/boards/mach-rsk/devices-rsk7269.c .start = 85, start 51 arch/sh/boards/mach-rsk/setup.c .start = 0x20000000, start 22 arch/sh/boards/mach-sdk7780/setup.c .start = PA_LED, start 38 arch/sh/boards/mach-sdk7780/setup.c .start = PA_LAN + 0x300, start 43 arch/sh/boards/mach-sdk7780/setup.c .start = IRQ_ETHERNET, start 28 arch/sh/boards/mach-sdk7786/setup.c .start = 0x07fff8b0, start 49 arch/sh/boards/mach-sdk7786/setup.c .start = 0x07ffff00, start 55 arch/sh/boards/mach-sdk7786/setup.c .start = evt2irq(0x2c0), start 79 arch/sh/boards/mach-sdk7786/setup.c .start = 0x07fff9e0, start 92 arch/sh/boards/mach-sdk7786/setup.c .start = 0x07fffc30, start 22 arch/sh/boards/mach-se/7206/setup.c .start = PA_SMSC + 0x300, start 27 arch/sh/boards/mach-se/7206/setup.c .start = 64, start 57 arch/sh/boards/mach-se/7206/setup.c .start = PA_LED, start 17 arch/sh/boards/mach-se/7343/setup.c .start = PA_LED, start 55 arch/sh/boards/mach-se/7343/setup.c .start = 0x00000000, start 105 arch/sh/boards/mach-se/7343/setup.c .start = 0x11800000, start 110 arch/sh/boards/mach-se/7343/setup.c .start = 0x11800002, start 154 arch/sh/boards/mach-se/7343/setup.c usb_resources[2].start = usb_resources[2].end = start 71 arch/sh/boards/mach-se/770x/setup.c .start = PA_MRSHPC_IO + 0x1f0, start 76 arch/sh/boards/mach-se/770x/setup.c .start = PA_MRSHPC_IO + 0x1f0 + 0x206, start 81 arch/sh/boards/mach-se/770x/setup.c .start = IRQ_CFCARD, start 101 arch/sh/boards/mach-se/770x/setup.c .start = PA_LED, start 126 arch/sh/boards/mach-se/770x/setup.c .start = SH_ETH0_BASE, start 131 arch/sh/boards/mach-se/770x/setup.c .start = SH_TSU_BASE, start 136 arch/sh/boards/mach-se/770x/setup.c .start = SH_ETH0_IRQ, start 154 arch/sh/boards/mach-se/770x/setup.c .start = SH_ETH1_BASE, start 159 arch/sh/boards/mach-se/770x/setup.c .start = SH_TSU_BASE, start 164 arch/sh/boards/mach-se/770x/setup.c .start = SH_ETH1_IRQ, start 25 arch/sh/boards/mach-se/7721/setup.c .start = PA_LED, start 42 arch/sh/boards/mach-se/7721/setup.c .start = PA_MRSHPC_IO + 0x1f0, start 47 arch/sh/boards/mach-se/7721/setup.c .start = PA_MRSHPC_IO + 0x1f0 + 0x206, start 52 arch/sh/boards/mach-se/7721/setup.c .start = MRSHPC_IRQ0, start 28 arch/sh/boards/mach-se/7722/setup.c .start = PA_LED, start 48 arch/sh/boards/mach-se/7722/setup.c .start = PA_LAN + 0x300, start 72 arch/sh/boards/mach-se/7722/setup.c .start = PA_MRSHPC_IO + 0x1f0, start 77 arch/sh/boards/mach-se/7722/setup.c .start = PA_MRSHPC_IO + 0x1f0 + 0x206, start 111 arch/sh/boards/mach-se/7722/setup.c .start = 0x044b0000, start 116 arch/sh/boards/mach-se/7722/setup.c .start = evt2irq(0xbe0), start 143 arch/sh/boards/mach-se/7722/setup.c cf_ide_resources[2].start = cf_ide_resources[2].end = start 146 arch/sh/boards/mach-se/7722/setup.c smc91x_eth_resources[1].start = smc91x_eth_resources[1].end = start 77 arch/sh/boards/mach-se/7724/setup.c .start = PA_LED, start 97 arch/sh/boards/mach-se/7724/setup.c .start = 0x1a300300, start 102 arch/sh/boards/mach-se/7724/setup.c .start = IRQ0_SMC, start 143 arch/sh/boards/mach-se/7724/setup.c .start = 0x00000000, start 205 arch/sh/boards/mach-se/7724/setup.c .start = 0xfe940000, start 210 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xf40), start 232 arch/sh/boards/mach-se/7724/setup.c .start = 0xfe910000, start 237 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0x880), start 260 arch/sh/boards/mach-se/7724/setup.c .start = 0xfe914000, start 265 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0x9e0), start 285 arch/sh/boards/mach-se/7724/setup.c .start = 0xFE3C0000, start 290 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xf80), start 342 arch/sh/boards/mach-se/7724/setup.c .start = 0x044b0000, start 347 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xbe0), start 365 arch/sh/boards/mach-se/7724/setup.c .start = SH_ETH_ADDR, start 370 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xd60), start 396 arch/sh/boards/mach-se/7724/setup.c .start = 0xa4d80000, start 401 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xa20), start 425 arch/sh/boards/mach-se/7724/setup.c .start = 0xa4d90000, start 430 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xa40), start 460 arch/sh/boards/mach-se/7724/setup.c .start = 0x04ce0000, start 465 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0xe80), start 489 arch/sh/boards/mach-se/7724/setup.c .start = 0x04cf0000, start 494 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0x4e0), start 519 arch/sh/boards/mach-se/7724/setup.c .start = 0xA45D0000, start 524 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0x480), start 557 arch/sh/boards/mach-se/7724/setup.c .start = 0xfe960000, start 562 arch/sh/boards/mach-se/7724/setup.c .start = evt2irq(0x8e0), start 28 arch/sh/boards/mach-se/7751/setup.c .start = PA_LED, start 18 arch/sh/boards/mach-se/7780/setup.c .start = PA_LED, start 34 arch/sh/boards/mach-se/7780/setup.c .start = PA_LAN + 0x300, start 39 arch/sh/boards/mach-se/7780/setup.c .start = SMC_IRQ, start 27 arch/sh/boards/mach-sh03/setup.c .start = 0x1f0, start 32 arch/sh/boards/mach-sh03/setup.c .start = 0x1f0 + 0x206, start 37 arch/sh/boards/mach-sh03/setup.c .start = IRL2_IRQ, start 51 arch/sh/boards/mach-sh03/setup.c .start = 0xa0800000, start 85 arch/sh/boards/mach-sh03/setup.c cf_ide_resources[0].start += (unsigned long)cf_ide_base; start 87 arch/sh/boards/mach-sh03/setup.c cf_ide_resources[1].start += (unsigned long)cf_ide_base; start 49 arch/sh/boards/mach-sh7763rdp/setup.c .start = 0, start 72 arch/sh/boards/mach-sh7763rdp/setup.c .start = 0xFEE00800, /* use eth1 */ start 76 arch/sh/boards/mach-sh7763rdp/setup.c .start = 0xFEE01800, /* TSU */ start 80 arch/sh/boards/mach-sh7763rdp/setup.c .start = evt2irq(0x920), /* irq number */ start 102 arch/sh/boards/mach-sh7763rdp/setup.c .start = 0xFFE80000, start 27 arch/sh/boards/mach-x3proto/setup.c .start = 0xb8140020, start 46 arch/sh/boards/mach-x3proto/setup.c .start = 0x18000300, start 73 arch/sh/boards/mach-x3proto/setup.c .start = 0x18040000, start 103 arch/sh/boards/mach-x3proto/setup.c .start = 0x18080000, start 248 arch/sh/boards/mach-x3proto/setup.c r8a66597_usb_host_resources[1].start = start 251 arch/sh/boards/mach-x3proto/setup.c m66592_usb_peripheral_resources[1].start = start 254 arch/sh/boards/mach-x3proto/setup.c smc91x_resources[1].start = start 99 arch/sh/drivers/heartbeat.c hd->base = ioremap_nocache(res->start, resource_size(res)); start 41 arch/sh/drivers/pci/fixups-dreamcast.c dev->resource[1].start = p->resources[0].start + 0x100; start 42 arch/sh/drivers/pci/fixups-dreamcast.c dev->resource[1].end = dev->resource[1].start + 0x200 - 1; start 59 arch/sh/drivers/pci/fixups-dreamcast.c res.start = GAPSPCI_DMA_BASE; start 64 arch/sh/drivers/pci/fixups-dreamcast.c res.start, start 65 arch/sh/drivers/pci/fixups-dreamcast.c region.start, start 102 arch/sh/drivers/pci/fixups-se7751.c BUG_ON(chan->resources[1].start != SH7751_PCI_MEMORY_BASE); start 104 arch/sh/drivers/pci/fixups-se7751.c PCIC_WRITE(SH7751_PCIMBR, chan->resources[1].start); start 107 arch/sh/drivers/pci/fixups-se7751.c PCIC_WRITE(SH7751_PCIIOBR, (chan->resources[0].start & SH7751_PCIIOBR_MASK)); start 28 arch/sh/drivers/pci/pci-dreamcast.c .start = GAPSPCI_BBA_CONFIG, start 33 arch/sh/drivers/pci/pci-dreamcast.c .start = GAPSPCI_DMA_BASE, start 209 arch/sh/drivers/pci/pci-sh5.c sh5_pci_resources[0].start = PCI_IO_AREA; start 212 arch/sh/drivers/pci/pci-sh5.c sh5_pci_resources[1].start = memStart; start 48 arch/sh/drivers/pci/pci-sh7751.c .start = 0x1000, start 53 arch/sh/drivers/pci/pci-sh7751.c .start = SH7751_PCI_MEMORY_BASE, start 129 arch/sh/drivers/pci/pci-sh7751.c word = chan->resources[1].start & SH4_PCIMBR_MASK; start 135 arch/sh/drivers/pci/pci-sh7751.c word = chan->resources[0].start & SH4_PCIIOBR_MASK; start 31 arch/sh/drivers/pci/pci-sh7780.c .start = 0x1000, start 36 arch/sh/drivers/pci/pci-sh7780.c .start = 0xfd000000, start 41 arch/sh/drivers/pci/pci-sh7780.c .start = 0x10000000, start 49 arch/sh/drivers/pci/pci-sh7780.c .start = 0xc0000000, start 370 arch/sh/drivers/pci/pci-sh7780.c __raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1)); start 177 arch/sh/drivers/pci/pci.c resource_size_t start = res->start; start 180 arch/sh/drivers/pci/pci.c if (start < PCIBIOS_MIN_IO + hose->resources[0].start) start 181 arch/sh/drivers/pci/pci.c start = PCIBIOS_MIN_IO + hose->resources[0].start; start 186 arch/sh/drivers/pci/pci.c if (start & 0x300) start 187 arch/sh/drivers/pci/pci.c start = (start + 0x3ff) & ~0x3ff; start 190 arch/sh/drivers/pci/pci.c return start; start 43 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xfd000000, start 48 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xc0000000, start 53 arch/sh/drivers/pci/pcie-sh7786.c .start = 0x10000000, start 58 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xfe100000, start 67 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xfd800000, start 72 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xa0000000, start 77 arch/sh/drivers/pci/pcie-sh7786.c .start = 0x30000000, start 82 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xfe300000, start 91 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xfc800000, start 96 arch/sh/drivers/pci/pcie-sh7786.c .start = 0x80000000, start 101 arch/sh/drivers/pci/pcie-sh7786.c .start = 0x20000000, start 106 arch/sh/drivers/pci/pcie-sh7786.c .start = 0xfcd00000, start 114 arch/sh/drivers/pci/pcie-sh7786.c #define DEFINE_CONTROLLER(start, idx) \ start 119 arch/sh/drivers/pci/pcie-sh7786.c .reg_base = start, \ start 143 arch/sh/drivers/pci/pcie-sh7786.c dev->resource[i].start = 0; start 466 arch/sh/drivers/pci/pcie-sh7786.c pci_write_reg(chan, upper_32_bits(res->start), start 468 arch/sh/drivers/pci/pcie-sh7786.c pci_write_reg(chan, lower_32_bits(res->start), start 599 arch/sh/drivers/pci/pcie-sh7786.c port->hose->io_map_base = port->hose->resources[0].start; start 27 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_EMI_CBLOCK, start 32 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_EMI_DBLOCK, start 46 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_FEMI_CBLOCK, start 51 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_FEMI_DBLOCK, start 65 arch/sh/drivers/superhyway/ops-sh4-202.c .start = P4SEGADDR(0x1e7ffff8), start 70 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_EPBR_BLOCK, start 83 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_DMAC_BLOCK, start 96 arch/sh/drivers/superhyway/ops-sh4-202.c .start = P4SEGADDR(0x1ffffff8), start 101 arch/sh/drivers/superhyway/ops-sh4-202.c .start = PHYS_PBR_BLOCK, start 35 arch/sh/include/asm/cacheflush.h extern void (*__flush_wback_region)(void *start, int size); start 36 arch/sh/include/asm/cacheflush.h extern void (*__flush_purge_region)(void *start, int size); start 37 arch/sh/include/asm/cacheflush.h extern void (*__flush_invalidate_region)(void *start, int size); start 45 arch/sh/include/asm/cacheflush.h unsigned long start, unsigned long end); start 48 arch/sh/include/asm/cacheflush.h extern void flush_icache_range(unsigned long start, unsigned long end); start 90 arch/sh/include/asm/cacheflush.h #define flush_cache_vmap(start, end) local_flush_cache_all(NULL) start 91 arch/sh/include/asm/cacheflush.h #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL) start 30 arch/sh/include/asm/mmzone.h void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end); start 33 arch/sh/include/asm/mmzone.h setup_bootmem_node(int nid, unsigned long start, unsigned long end) start 144 arch/sh/include/asm/pgtable.h extern void page_table_range_init(unsigned long start, unsigned long end, start 17 arch/sh/include/asm/tlbflush.h unsigned long start, start 21 arch/sh/include/asm/tlbflush.h extern void local_flush_tlb_kernel_range(unsigned long start, start 31 arch/sh/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 34 arch/sh/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 44 arch/sh/include/asm/tlbflush.h #define flush_tlb_range(vma, start, end) \ start 45 arch/sh/include/asm/tlbflush.h local_flush_tlb_range(vma, start, end) start 47 arch/sh/include/asm/tlbflush.h #define flush_tlb_kernel_range(start, end) \ start 48 arch/sh/include/asm/tlbflush.h local_flush_tlb_kernel_range(start, end) start 31 arch/sh/include/cpu-sh4/cpu/sq.h void sq_flush_range(unsigned long start, unsigned int len); start 147 arch/sh/kernel/cpu/proc.c .start = c_start, start 127 arch/sh/kernel/cpu/sh2/setup-sh7619.c .start = 0xfb000000, start 132 arch/sh/kernel/cpu/sh2/setup-sh7619.c .start = 85, start 16 arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c .start = 0xfffe3800, start 16 arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c .start = 0xfffe3800, start 17 arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c .start = 0xfffe3800, start 339 arch/sh/kernel/cpu/sh2a/setup-sh7201.c .start = 0xffff0800, start 345 arch/sh/kernel/cpu/sh2a/setup-sh7201.c .start = 152, start 292 arch/sh/kernel/cpu/sh2a/setup-sh7203.c .start = 0xffff2000, start 298 arch/sh/kernel/cpu/sh2a/setup-sh7203.c .start = 231, start 452 arch/sh/kernel/cpu/sh2a/setup-sh7264.c .start = 0xfffe6000, start 458 arch/sh/kernel/cpu/sh2a/setup-sh7264.c .start = 296, start 484 arch/sh/kernel/cpu/sh2a/setup-sh7264.c .start = 0xffffc000, start 489 arch/sh/kernel/cpu/sh2a/setup-sh7264.c .start = 170, start 474 arch/sh/kernel/cpu/sh2a/setup-sh7269.c .start = 0xfffe6000, start 480 arch/sh/kernel/cpu/sh2a/setup-sh7269.c .start = 338, start 500 arch/sh/kernel/cpu/sh2a/setup-sh7269.c .start = 0xe8010000, start 505 arch/sh/kernel/cpu/sh2a/setup-sh7269.c .start = 170, start 16 arch/sh/kernel/cpu/sh3/pinmux-sh7720.c .start = 0xa4050100, start 114 arch/sh/kernel/cpu/sh3/setup-sh7705.c .start = 0xfffffec0, start 119 arch/sh/kernel/cpu/sh3/setup-sh7705.c .start = evt2irq(0x480), start 91 arch/sh/kernel/cpu/sh3/setup-sh770x.c .start = 0xfffffec0, start 96 arch/sh/kernel/cpu/sh3/setup-sh770x.c .start = evt2irq(0x480), start 73 arch/sh/kernel/cpu/sh3/setup-sh7710.c .start = 0xa413fec0, start 78 arch/sh/kernel/cpu/sh3/setup-sh7710.c .start = evt2irq(0x480), start 26 arch/sh/kernel/cpu/sh3/setup-sh7720.c .start = 0xa413fec0, start 32 arch/sh/kernel/cpu/sh3/setup-sh7720.c .start = evt2irq(0x480), start 95 arch/sh/kernel/cpu/sh3/setup-sh7720.c .start = 0xA4428000, start 100 arch/sh/kernel/cpu/sh3/setup-sh7720.c .start = evt2irq(0xa60), start 125 arch/sh/kernel/cpu/sh3/setup-sh7720.c .start = 0xA4420000, start 131 arch/sh/kernel/cpu/sh3/setup-sh7720.c .start = evt2irq(0xa20), start 19 arch/sh/kernel/cpu/sh4/setup-sh7750.c .start = 0xffc80000, start 25 arch/sh/kernel/cpu/sh4/setup-sh7750.c .start = evt2irq(0x480), start 57 arch/sh/kernel/cpu/sh4/sq.c void sq_flush_range(unsigned long start, unsigned int len) start 59 arch/sh/kernel/cpu/sh4/sq.c unsigned long *sq = (unsigned long *)start; start 10 arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c .start = 0xa4050100, start 16 arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c .start = 0xa4050100, start 21 arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c .start = 0xa4050100, start 16 arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c .start = 0xFFFC0000, start 21 arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c .start = 0xFFC40000, start 21 arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c .start = 0xffec0000, start 16 arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c .start = 0xffe70000, start 21 arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c .start = 0xffcc0000, start 15 arch/sh/kernel/cpu/sh4a/pinmux-shx3.c .start = 0xffc70000, start 100 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = 0x04470000, start 105 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = evt2irq(0xe00), start 121 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = 0x04750000, start 126 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = evt2irq(0x780), start 148 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = 0xfe900000, start 176 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = 0xfe920000, start 204 arch/sh/kernel/cpu/sh4a/setup-sh7343.c .start = 0xfea00000, start 42 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = 0x04470000, start 47 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = evt2irq(0xe00), start 66 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = 0xa4d80000, start 71 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = evt2irq(0xa20), start 98 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = 0xfe900000, start 126 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = 0xfe920000, start 154 arch/sh/kernel/cpu/sh4a/setup-sh7366.c .start = 0xfe924000, start 137 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xfe008020, start 143 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xfe009000, start 149 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0xbc0), start 155 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0x800), start 161 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0xb80), start 246 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xa465fec0, start 252 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0x7a0), start 257 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0x7c0), start 262 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0x780), start 281 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0x04480000, start 286 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0xa20), start 307 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0x04470000, start 312 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0xe00), start 334 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xfe900000, start 362 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xfe920000, start 390 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xfea00000, start 458 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = 0xa4540000, start 463 arch/sh/kernel/cpu/sh4a/setup-sh7722.c .start = evt2irq(0xf80), start 154 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = 0xfe900000, start 182 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = 0xfe920000, start 210 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = 0xfe924000, start 292 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = 0xa465fec0, start 298 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = evt2irq(0xaa0), start 303 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = evt2irq(0xac0), start 308 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = evt2irq(0xa80), start 326 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = 0xa4d80000, start 331 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = evt2irq(0xa20), start 352 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = 0x04470000, start 357 arch/sh/kernel/cpu/sh4a/setup-sh7723.c .start = evt2irq(0xe00), start 204 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe008020, start 210 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe009000, start 216 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xbc0), start 222 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0x800), start 228 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xb80), start 238 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfdc08020, start 244 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfdc09000, start 250 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xb40), start 256 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0x700), start 262 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xb00), start 415 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xa465fec0, start 421 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xaa0), start 426 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xac0), start 431 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xa80), start 447 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0x04470000, start 452 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xe00), start 469 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0x04750000, start 474 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = evt2irq(0xd80), start 497 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe900000, start 526 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe920000, start 555 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe924000, start 584 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe930000, start 613 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe940000, start 703 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xfe980000, start 732 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xFE200000, start 761 arch/sh/kernel/cpu/sh4a/setup-sh7724.c .start = 0xFE300000, start 154 arch/sh/kernel/cpu/sh4a/setup-sh7734.c .start = 0xFFFC5000, start 159 arch/sh/kernel/cpu/sh4a/setup-sh7734.c .start = evt2irq(0xC00), start 175 arch/sh/kernel/cpu/sh4a/setup-sh7734.c .start = 0xFFC70000, start 180 arch/sh/kernel/cpu/sh4a/setup-sh7734.c .start = evt2irq(0x860), start 105 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xfe002000, start 110 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xcc0), start 442 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff608020, start 448 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff609000, start 454 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x640), start 464 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff618020, start 470 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff619000, start 476 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x640), start 482 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x7c0), start 488 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x7c0), start 494 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xd00), start 500 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xd00), start 506 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xd00), start 512 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xd00), start 518 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xd00), start 524 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0xd00), start 534 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff708020, start 540 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff709000, start 546 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x2a60), start 552 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x2400), start 558 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x24e0), start 568 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff718020, start 574 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xff719000, start 580 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x2a80), start 586 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x2500), start 592 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x2600), start 651 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xffd8ee70, start 656 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x8c0), start 670 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xfe480000, start 675 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x1d80), start 689 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xfe4f1000, start 694 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x920), start 713 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = 0xfe4f1800, start 718 arch/sh/kernel/cpu/sh4a/setup-sh7757.c .start = evt2irq(0x920), start 83 arch/sh/kernel/cpu/sh4a/setup-sh7763.c .start = 0xffe80000, start 89 arch/sh/kernel/cpu/sh4a/setup-sh7763.c .start = evt2irq(0x480), start 103 arch/sh/kernel/cpu/sh4a/setup-sh7763.c .start = 0xffec8000, start 108 arch/sh/kernel/cpu/sh4a/setup-sh7763.c .start = evt2irq(0xc60), start 132 arch/sh/kernel/cpu/sh4a/setup-sh7763.c .start = 0xffec0000, start 137 arch/sh/kernel/cpu/sh4a/setup-sh7763.c .start = evt2irq(0xc80), start 103 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = 0xffe80000, start 109 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = evt2irq(0x480), start 195 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = 0xfc808020, start 201 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = 0xfc809000, start 211 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = evt2irq(0x640), start 220 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = 0xfc818020, start 231 arch/sh/kernel/cpu/sh4a/setup-sh7780.c .start = evt2irq(0x7c0), start 261 arch/sh/kernel/cpu/sh4a/setup-sh7785.c .start = 0xfc808020, start 267 arch/sh/kernel/cpu/sh4a/setup-sh7785.c .start = 0xfc809000, start 277 arch/sh/kernel/cpu/sh4a/setup-sh7785.c .start = evt2irq(0x620), start 286 arch/sh/kernel/cpu/sh4a/setup-sh7785.c .start = 0xfcc08020, start 297 arch/sh/kernel/cpu/sh4a/setup-sh7785.c .start = evt2irq(0x880), start 298 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = 0xfe008020, start 303 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = 0xfe009000, start 308 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = evt2irq(0x5c0), start 313 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = evt2irq(0x500), start 334 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = USB_EHCI_START, start 339 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = evt2irq(0xba0), start 358 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = USB_OHCI_START, start 363 arch/sh/kernel/cpu/sh4a/setup-sh7786.c .start = evt2irq(0xba0), start 813 arch/sh/kernel/cpu/sh4a/setup-sh7786.c scif1_demux_resources[1].start = start 815 arch/sh/kernel/cpu/sh4a/setup-sh7786.c scif1_demux_resources[2].start = start 817 arch/sh/kernel/cpu/sh4a/setup-sh7786.c scif1_demux_resources[3].start = irq; start 818 arch/sh/kernel/cpu/sh4a/setup-sh7786.c scif1_demux_resources[4].start = start 41 arch/sh/kernel/cpu/sh5/setup-sh5.c .start = PHYS_PERIPHERAL_BLOCK + 0x01040000, start 47 arch/sh/kernel/cpu/sh5/setup-sh5.c .start = IRQ_PRI, start 52 arch/sh/kernel/cpu/sh5/setup-sh5.c .start = IRQ_CUI, start 57 arch/sh/kernel/cpu/sh5/setup-sh5.c .start = IRQ_ATI, start 866 arch/sh/kernel/dwarf.c void *start, unsigned long len, start 875 arch/sh/kernel/dwarf.c void *p = start; start 924 arch/sh/kernel/dwarf.c unsigned long start, end; start 928 arch/sh/kernel/dwarf.c start = fde->initial_location; start 936 arch/sh/kernel/dwarf.c if (start < tmp_start) start 938 arch/sh/kernel/dwarf.c else if (start >= tmp_end) start 1093 arch/sh/kernel/dwarf.c unsigned long start, end; start 1096 arch/sh/kernel/dwarf.c start = end = 0; start 1102 arch/sh/kernel/dwarf.c start = sechdrs[i].sh_addr; start 1103 arch/sh/kernel/dwarf.c end = start + sechdrs[i].sh_size; start 1112 arch/sh/kernel/dwarf.c err = dwarf_parse_section((char *)start, (char *)end, me); start 83 arch/sh/kernel/io_trapped.c (unsigned long)res->start); start 122 arch/sh/kernel/io_trapped.c if (res->start == offset) { start 174 arch/sh/kernel/io_trapped.c return res->start + (address - vaddr); start 64 arch/sh/kernel/machine_kexec.c printk(" start : 0x%08x\n\n", (unsigned int)image->start); start 121 arch/sh/kernel/machine_kexec.c (unsigned long)phys_to_virt(image->start)); start 161 arch/sh/kernel/machine_kexec.c crashk_res.start = crash_base; start 165 arch/sh/kernel/machine_kexec.c if (crashk_res.end == crashk_res.start) start 169 arch/sh/kernel/machine_kexec.c if (!crashk_res.start) { start 171 arch/sh/kernel/machine_kexec.c crashk_res.start = memblock_phys_alloc_range(crash_size, start 173 arch/sh/kernel/machine_kexec.c if (!crashk_res.start) { start 178 arch/sh/kernel/machine_kexec.c ret = memblock_reserve(crashk_res.start, crash_size); start 186 arch/sh/kernel/machine_kexec.c crashk_res.end = crashk_res.start + crash_size - 1; start 199 arch/sh/kernel/machine_kexec.c (unsigned long)(crashk_res.start), start 205 arch/sh/kernel/machine_kexec.c crashk_res.start = crashk_res.end = 0; start 347 arch/sh/kernel/perf_event.c .start = sh_pmu_start, start 123 arch/sh/kernel/setup.c unsigned long start, end; start 132 arch/sh/kernel/setup.c start = INITRD_START + __MEMORY_START; start 133 arch/sh/kernel/setup.c end = start + INITRD_SIZE; start 135 arch/sh/kernel/setup.c if (unlikely(end <= start)) start 137 arch/sh/kernel/setup.c if (unlikely(start & ~PAGE_MASK)) { start 142 arch/sh/kernel/setup.c if (unlikely(start < __MEMORY_START)) { start 144 arch/sh/kernel/setup.c start, __MEMORY_START); start 165 arch/sh/kernel/setup.c initrd_start = (unsigned long)__va(start); start 200 arch/sh/kernel/setup.c unsigned long start, end; start 204 arch/sh/kernel/setup.c start = start_pfn << PAGE_SHIFT; start 208 arch/sh/kernel/setup.c res->start = start; start 235 arch/sh/kernel/setup.c pmb_bolt_mapping((unsigned long)__va(start), start, end - start, start 303 arch/sh/kernel/setup.c code_resource.start = virt_to_phys(_text); start 305 arch/sh/kernel/setup.c data_resource.start = virt_to_phys(_etext); start 307 arch/sh/kernel/setup.c bss_resource.start = virt_to_phys(__bss_start); start 391 arch/sh/kernel/smp.c unsigned long start, unsigned long end) start 400 arch/sh/kernel/smp.c fd.addr1 = start; start 409 arch/sh/kernel/smp.c local_flush_tlb_range(vma, start, end); start 420 arch/sh/kernel/smp.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 424 arch/sh/kernel/smp.c fd.addr1 = start; start 18 arch/sh/mm/cache-sh2.c static void sh2__flush_wback_region(void *start, int size) start 23 arch/sh/mm/cache-sh2.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 24 arch/sh/mm/cache-sh2.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 39 arch/sh/mm/cache-sh2.c static void sh2__flush_purge_region(void *start, int size) start 44 arch/sh/mm/cache-sh2.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 45 arch/sh/mm/cache-sh2.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 53 arch/sh/mm/cache-sh2.c static void sh2__flush_invalidate_region(void *start, int size) start 75 arch/sh/mm/cache-sh2.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 76 arch/sh/mm/cache-sh2.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 49 arch/sh/mm/cache-sh2a.c static void sh2a__flush_wback_region(void *start, int size) start 57 arch/sh/mm/cache-sh2a.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 58 arch/sh/mm/cache-sh2a.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 91 arch/sh/mm/cache-sh2a.c static void sh2a__flush_purge_region(void *start, int size) start 97 arch/sh/mm/cache-sh2a.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 98 arch/sh/mm/cache-sh2a.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 121 arch/sh/mm/cache-sh2a.c static void sh2a__flush_invalidate_region(void *start, int size) start 127 arch/sh/mm/cache-sh2a.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 128 arch/sh/mm/cache-sh2a.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 153 arch/sh/mm/cache-sh2a.c unsigned long start, end; start 157 arch/sh/mm/cache-sh2a.c start = data->addr1 & ~(L1_CACHE_BYTES-1); start 161 arch/sh/mm/cache-sh2a.c sh2a__flush_wback_region((void *)start, end-start); start 169 arch/sh/mm/cache-sh2a.c if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { start 173 arch/sh/mm/cache-sh2a.c for (v = start; v < end; v += L1_CACHE_BYTES) start 34 arch/sh/mm/cache-sh3.c static void sh3__flush_wback_region(void *start, int size) start 40 arch/sh/mm/cache-sh3.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 41 arch/sh/mm/cache-sh3.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 73 arch/sh/mm/cache-sh3.c static void sh3__flush_purge_region(void *start, int size) start 78 arch/sh/mm/cache-sh3.c begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); start 79 arch/sh/mm/cache-sh3.c end = ((unsigned long)start + size + L1_CACHE_BYTES-1) start 43 arch/sh/mm/cache-sh4.c unsigned long start, end; start 47 arch/sh/mm/cache-sh4.c start = data->addr1; start 51 arch/sh/mm/cache-sh4.c if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { start 60 arch/sh/mm/cache-sh4.c start &= ~(L1_CACHE_BYTES-1); start 67 arch/sh/mm/cache-sh4.c for (v = start; v < end; v += L1_CACHE_BYTES) { start 89 arch/sh/mm/cache-sh4.c static inline void flush_cache_one(unsigned long start, unsigned long phys) start 98 arch/sh/mm/cache-sh4.c (start < CACHE_OC_ADDRESS_ARRAY)) start 102 arch/sh/mm/cache-sh4.c __flush_cache_one(start, phys, exec_offset); start 280 arch/sh/mm/cache-sh4.c unsigned long start, end; start 283 arch/sh/mm/cache-sh4.c start = data->addr1; start 71 arch/sh/mm/cache-sh5.c static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) start 77 arch/sh/mm/cache-sh5.c aligned_start = (unsigned long long)(signed long long)(signed long) start; start 133 arch/sh/mm/cache-sh5.c unsigned long start, unsigned long end) start 154 arch/sh/mm/cache-sh5.c n_pages = ((end - start) >> PAGE_SHIFT); start 173 arch/sh/mm/cache-sh5.c aligned_start = start & PAGE_MASK; start 204 arch/sh/mm/cache-sh5.c static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) start 222 arch/sh/mm/cache-sh5.c aligned_start = L1_CACHE_ALIGN(start); start 468 arch/sh/mm/cache-sh5.c unsigned long start, unsigned long end) start 470 arch/sh/mm/cache-sh5.c int n_pages = ((end - start) >> PAGE_SHIFT); start 472 arch/sh/mm/cache-sh5.c if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { start 476 arch/sh/mm/cache-sh5.c start &= PAGE_MASK; /* should already be so */ start 478 arch/sh/mm/cache-sh5.c sh64_dcache_purge_user_pages(mm, start, end); start 529 arch/sh/mm/cache-sh5.c unsigned long start, end; start 532 arch/sh/mm/cache-sh5.c start = data->addr1; start 535 arch/sh/mm/cache-sh5.c sh64_dcache_purge_user_range(vma->vm_mm, start, end); start 536 arch/sh/mm/cache-sh5.c sh64_icache_inv_user_page_range(vma->vm_mm, start, end); start 581 arch/sh/mm/cache-sh5.c unsigned long start, end; start 583 arch/sh/mm/cache-sh5.c start = data->addr1; start 586 arch/sh/mm/cache-sh5.c __flush_purge_region((void *)start, end); start 588 arch/sh/mm/cache-sh5.c sh64_icache_inv_kernel_range(start, end); start 70 arch/sh/mm/cache-sh7705.c unsigned long start, end; start 72 arch/sh/mm/cache-sh7705.c start = data->addr1; start 75 arch/sh/mm/cache-sh7705.c __flush_wback_region((void *)start, end - start); start 28 arch/sh/mm/cache.c void (*__flush_wback_region)(void *start, int size); start 30 arch/sh/mm/cache.c void (*__flush_purge_region)(void *start, int size); start 32 arch/sh/mm/cache.c void (*__flush_invalidate_region)(void *start, int size); start 35 arch/sh/mm/cache.c static inline void noop__flush_region(void *start, int size) start 205 arch/sh/mm/cache.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 211 arch/sh/mm/cache.c data.addr1 = start; start 224 arch/sh/mm/cache.c void flush_icache_range(unsigned long start, unsigned long end) start 229 arch/sh/mm/cache.c data.addr1 = start; start 64 arch/sh/mm/consistent.c r->start = dma_handle; start 65 arch/sh/mm/consistent.c r->end = r->start + memsize - 1; start 14 arch/sh/mm/flush-sh4.c static void sh4__flush_wback_region(void *start, int size) start 18 arch/sh/mm/flush-sh4.c aligned_start = register_align(start); start 48 arch/sh/mm/flush-sh4.c static void sh4__flush_purge_region(void *start, int size) start 52 arch/sh/mm/flush-sh4.c aligned_start = register_align(start); start 78 arch/sh/mm/flush-sh4.c static void sh4__flush_invalidate_region(void *start, int size) start 82 arch/sh/mm/flush-sh4.c aligned_start = register_align(start); start 164 arch/sh/mm/init.c void __init page_table_range_init(unsigned long start, unsigned long end, start 174 arch/sh/mm/init.c vaddr = start; start 249 arch/sh/mm/init.c u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET; start 263 arch/sh/mm/init.c memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); start 408 arch/sh/mm/init.c int arch_add_memory(int nid, u64 start, u64 size, start 411 arch/sh/mm/init.c unsigned long start_pfn = PFN_DOWN(start); start 432 arch/sh/mm/init.c void arch_remove_memory(int nid, u64 start, u64 size, start 435 arch/sh/mm/init.c unsigned long start_pfn = PFN_DOWN(start); start 48 arch/sh/mm/nommu.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 64 arch/sh/mm/nommu.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 92 arch/sh/mm/nommu.c void __init page_table_range_init(unsigned long start, unsigned long end, start 26 arch/sh/mm/numa.c void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) start 33 arch/sh/mm/numa.c start_pfn = PFN_DOWN(start); start 36 arch/sh/mm/numa.c pmb_bolt_mapping((unsigned long)__va(start), start, end - start, start 39 arch/sh/mm/numa.c memblock_add(start, end - start); start 39 arch/sh/mm/tlbflush_32.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 50 arch/sh/mm/tlbflush_32.c size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; start 60 arch/sh/mm/tlbflush_32.c start &= PAGE_MASK; start 67 arch/sh/mm/tlbflush_32.c while (start < end) { start 68 arch/sh/mm/tlbflush_32.c local_flush_tlb_one(asid, start); start 69 arch/sh/mm/tlbflush_32.c start += PAGE_SIZE; start 78 arch/sh/mm/tlbflush_32.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 85 arch/sh/mm/tlbflush_32.c size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; start 93 arch/sh/mm/tlbflush_32.c start &= PAGE_MASK; start 97 arch/sh/mm/tlbflush_32.c while (start < end) { start 98 arch/sh/mm/tlbflush_32.c local_flush_tlb_one(asid, start); start 99 arch/sh/mm/tlbflush_32.c start += PAGE_SIZE; start 79 arch/sh/mm/tlbflush_64.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 94 arch/sh/mm/tlbflush_64.c start &= PAGE_MASK; start 108 arch/sh/mm/tlbflush_64.c if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) start 121 arch/sh/mm/tlbflush_64.c if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) start 163 arch/sh/mm/tlbflush_64.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 100 arch/sparc/boot/piggyback.c static int get_start_end(const char *filename, unsigned int *start, start 106 arch/sparc/boot/piggyback.c *start = 0; start 113 arch/sparc/boot/piggyback.c *start = strtoul(buffer, NULL, 16); start 119 arch/sparc/boot/piggyback.c if (*start == 0 || *end == 0) start 177 arch/sparc/boot/piggyback.c unsigned int i, start, end; start 189 arch/sparc/boot/piggyback.c if (!get_start_end(argv[3], &start, &end)) { start 238 arch/sparc/boot/piggyback.c st4(buffer, align(end + 32 + 8191) - (start & ~0x3fffffUL) + start 249 arch/sparc/boot/piggyback.c if (lseek(image, AOUT_TEXT_OFFSET - start + align(end + 32), 0) < 0) start 13 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_range(vma,start,end) \ start 14 arch/sparc/include/asm/cacheflush_32.h sparc32_cachetlb_ops->cache_range(vma, start, end) start 17 arch/sparc/include/asm/cacheflush_32.h #define flush_icache_range(start, end) do { } while (0) start 47 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_vmap(start, end) flush_cache_all() start 48 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_vunmap(start, end) flush_cache_all() start 24 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_range(vma, start, end) \ start 34 arch/sparc/include/asm/cacheflush_64.h void flush_icache_range(unsigned long start, unsigned long end); start 47 arch/sparc/include/asm/cacheflush_64.h void __flush_dcache_range(unsigned long start, unsigned long end); start 75 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_vmap(start, end) do { } while (0) start 76 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_vunmap(start, end) do { } while (0) start 318 arch/sparc/include/asm/floppy_32.h r.start = fd_regs[0].phys_addr; start 597 arch/sparc/include/asm/floppy_64.h auxio_reg = (void __iomem *) op->resource[2].start; start 606 arch/sparc/include/asm/floppy_64.h op->resource[1].start; start 620 arch/sparc/include/asm/floppy_64.h sun_fdc = (struct sun_flpy_controller *) op->resource[0].start; start 664 arch/sparc/include/asm/floppy_64.h config = ecpp_op->resource[1].start; start 726 arch/sparc/include/asm/floppy_64.h (op->resource[0].start + start 14 arch/sparc/include/asm/iommu-common.h unsigned long start; start 106 arch/sparc/include/asm/iommu_32.h unsigned long start; /* First managed virtual address */ start 170 arch/sparc/include/asm/leon_amba.h unsigned int start, irq, bus_id; start 176 arch/sparc/include/asm/leon_amba.h unsigned int start[4], irq, bus_id; start 101 arch/sparc/include/asm/mmu_64.h unsigned long start; /* Start address for this tag storage */ start 111 arch/sparc/include/asm/parport.h unsigned long base = op->resource[0].start; start 112 arch/sparc/include/asm/parport.h unsigned long config = op->resource[1].start; start 113 arch/sparc/include/asm/parport.h unsigned long d_base = op->resource[2].start; start 206 arch/sparc/include/asm/parport.h unsigned long d_base = op->resource[2].start; start 1081 arch/sparc/include/asm/pgtable_64.h static inline unsigned long __untagged_addr(unsigned long start) start 1084 arch/sparc/include/asm/pgtable_64.h long addr = start; start 1099 arch/sparc/include/asm/pgtable_64.h return start; start 11 arch/sparc/include/asm/tlbflush_32.h #define flush_tlb_range(vma, start, end) \ start 12 arch/sparc/include/asm/tlbflush_32.h sparc32_cachetlb_ops->tlb_range(vma, start, end) start 19 arch/sparc/include/asm/tlbflush_32.h static inline void flush_tlb_kernel_range(unsigned long start, start 19 arch/sparc/include/asm/tlbflush_64.h void flush_tsb_kernel_range(unsigned long start, unsigned long end); start 36 arch/sparc/include/asm/tlbflush_64.h unsigned long start, unsigned long end) start 40 arch/sparc/include/asm/tlbflush_64.h void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 52 arch/sparc/include/asm/tlbflush_64.h void __flush_tlb_kernel_range(unsigned long start, unsigned long end); start 63 arch/sparc/include/asm/tlbflush_64.h void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); start 55 arch/sparc/include/asm/vvar.h unsigned int start) start 58 arch/sparc/include/asm/vvar.h return unlikely(s->seq != start); start 139 arch/sparc/kernel/adi_64.c if ((addr >= tag_desc->start) && start 188 arch/sparc/kernel/adi_64.c if ((addr >= tag_desc->start) && start 194 arch/sparc/kernel/adi_64.c if ((tag_desc->start > end_addr) && start 195 arch/sparc/kernel/adi_64.c (tag_desc->start < hole_end)) start 196 arch/sparc/kernel/adi_64.c hole_end = tag_desc->start; start 290 arch/sparc/kernel/adi_64.c tag_desc->start = addr; start 307 arch/sparc/kernel/adi_64.c tag_desc->start = tag_desc->end = 0; start 322 arch/sparc/kernel/adi_64.c ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize()))) start 68 arch/sparc/kernel/auxio_32.c r.start = auxregs[0].phys_addr; start 132 arch/sparc/kernel/auxio_32.c r.start = regs.phys_addr; start 102 arch/sparc/kernel/central.c p->leds_resource.start = (unsigned long) start 104 arch/sparc/kernel/central.c p->leds_resource.end = p->leds_resource.start; start 196 arch/sparc/kernel/central.c p->leds_resource.start = (unsigned long) start 198 arch/sparc/kernel/central.c p->leds_resource.end = p->leds_resource.start; start 433 arch/sparc/kernel/cpu.c .start =c_start, start 59 arch/sparc/kernel/iommu-common.c unsigned int start, i; start 71 arch/sparc/kernel/iommu-common.c start = 0; start 83 arch/sparc/kernel/iommu-common.c iommu->pools[i].start = start; start 84 arch/sparc/kernel/iommu-common.c iommu->pools[i].hint = start; start 85 arch/sparc/kernel/iommu-common.c start += iommu->poolsize; /* start for next pool */ start 86 arch/sparc/kernel/iommu-common.c iommu->pools[i].end = start - 1; start 92 arch/sparc/kernel/iommu-common.c p->start = start; start 93 arch/sparc/kernel/iommu-common.c p->hint = p->start; start 105 arch/sparc/kernel/iommu-common.c unsigned long n, end, start, limit, boundary_size; start 137 arch/sparc/kernel/iommu-common.c (*handle >= pool->start) && (*handle < pool->end)) start 138 arch/sparc/kernel/iommu-common.c start = *handle; start 140 arch/sparc/kernel/iommu-common.c start = pool->hint; start 150 arch/sparc/kernel/iommu-common.c if (start >= limit) start 151 arch/sparc/kernel/iommu-common.c start = pool->start; start 159 arch/sparc/kernel/iommu-common.c if ((start & mask) >= limit || pass > 0) { start 163 arch/sparc/kernel/iommu-common.c start = pool->start; start 165 arch/sparc/kernel/iommu-common.c start &= mask; start 185 arch/sparc/kernel/iommu-common.c n = iommu_area_alloc(iommu->map, limit, start, npages, shift, start 190 arch/sparc/kernel/iommu-common.c pool->hint = pool->start; start 199 arch/sparc/kernel/iommu-common.c pool->hint = pool->start; start 231 arch/sparc/kernel/iommu-common.c unsigned long largepool_start = tbl->large_pool.start; start 75 arch/sparc/kernel/ioport.c .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 start 79 arch/sparc/kernel/ioport.c .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 start 163 arch/sparc/kernel/ioport.c res->start + offset, start 222 arch/sparc/kernel/ioport.c sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { start 230 arch/sparc/kernel/ioport.c srmmu_mapiorange(bus, pa, res->start, resource_size(res)); start 232 arch/sparc/kernel/ioport.c return (void __iomem *)(unsigned long)(res->start + offset); start 244 arch/sparc/kernel/ioport.c srmmu_unmapiorange(res->start, plen); start 257 arch/sparc/kernel/ioport.c if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start, start 264 arch/sparc/kernel/ioport.c return res->start; start 389 arch/sparc/kernel/ioport.c (unsigned long long)r->start, start 37 arch/sparc/kernel/leon_pci.c info->io_space.start - 0x1000); start 573 arch/sparc/kernel/leon_pci_grpci1.c priv->pci_area = ofdev->resource[1].start; start 575 arch/sparc/kernel/leon_pci_grpci1.c priv->pci_io = ofdev->resource[2].start; start 576 arch/sparc/kernel/leon_pci_grpci1.c priv->pci_conf = ofdev->resource[2].start + 0x10000; start 598 arch/sparc/kernel/leon_pci_grpci1.c priv->info.io_space.start = priv->pci_io_va + 0x1000; start 607 arch/sparc/kernel/leon_pci_grpci1.c priv->info.mem_space.start = priv->pci_area; start 625 arch/sparc/kernel/leon_pci_grpci1.c priv->info.busn.start = 0; start 758 arch/sparc/kernel/leon_pci_grpci2.c priv->pci_area = ofdev->resource[1].start; start 760 arch/sparc/kernel/leon_pci_grpci2.c priv->pci_io = ofdev->resource[2].start; start 761 arch/sparc/kernel/leon_pci_grpci2.c priv->pci_conf = ofdev->resource[2].start + 0x10000; start 784 arch/sparc/kernel/leon_pci_grpci2.c priv->info.io_space.start = priv->pci_io_va + 0x1000; start 794 arch/sparc/kernel/leon_pci_grpci2.c priv->info.mem_space.start = priv->pci_area; start 805 arch/sparc/kernel/leon_pci_grpci2.c priv->info.busn.start = 0; start 185 arch/sparc/kernel/mdesc.c unsigned long start; start 191 arch/sparc/kernel/mdesc.c start = __pa(hp); start 192 arch/sparc/kernel/mdesc.c memblock_free_late(start, alloc_size); start 332 arch/sparc/kernel/of_device_32.c r->start = result & 0xffffffff; start 20 arch/sparc/kernel/of_device_64.c unsigned long ret = res->start + offset; start 412 arch/sparc/kernel/of_device_64.c r->start = result; start 231 arch/sparc/kernel/pci.c op_res->start, op_res->end, i); start 242 arch/sparc/kernel/pci.c res->start = op_res->start; start 397 arch/sparc/kernel/pci.c region.start = (first << 21); start 405 arch/sparc/kernel/pci.c region.start = (first << 29); start 483 arch/sparc/kernel/pci.c u64 start; start 524 arch/sparc/kernel/pci.c region.start = start = GET_64BIT(ranges, 1); start 525 arch/sparc/kernel/pci.c region.end = region.start + size - 1; start 529 arch/sparc/kernel/pci.c flags, start, size); start 637 arch/sparc/kernel/pci.c region.start = 0xa0000UL; start 638 arch/sparc/kernel/pci.c region.end = region.start + 0x1ffffUL; start 672 arch/sparc/kernel/pci.c if (r->parent || !r->start || !r->flags) start 705 arch/sparc/kernel/pci.c pbm->busn.start = pbm->pci_first_busno; start 784 arch/sparc/kernel/pci.c vma->vm_pgoff = (pbm->io_space.start + start 787 arch/sparc/kernel/pci.c vma->vm_pgoff = (pbm->mem_space.start + start 858 arch/sparc/kernel/pci.c if ((rp->start <= user_paddr) && start 991 arch/sparc/kernel/pci.c const struct resource *rp, resource_size_t *start, start 1004 arch/sparc/kernel/pci.c *start = region.start; start 346 arch/sparc/kernel/pci_common.c rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; start 347 arch/sparc/kernel/pci_common.c rp->end = rp->start + (unsigned long) vdma[1] - 1UL; start 411 arch/sparc/kernel/pci_common.c pbm->io_space.start = a; start 420 arch/sparc/kernel/pci_common.c pbm->mem_space.start = a; start 429 arch/sparc/kernel/pci_common.c pbm->mem64_space.start = a; start 455 arch/sparc/kernel/pci_common.c if (pbm->mem64_space.start <= pbm->mem_space.end) start 456 arch/sparc/kernel/pci_common.c pbm->mem64_space.start = pbm->mem_space.end + 1; start 457 arch/sparc/kernel/pci_common.c if (pbm->mem64_space.start > pbm->mem64_space.end) start 720 arch/sparc/kernel/pci_sun4v.c for (i = pool->start; i <= pool->end; i++) { start 486 arch/sparc/kernel/pcic.c address = dev->resource[j].start; start 508 arch/sparc/kernel/pcic.c dev->resource[j].start = start 1582 arch/sparc/kernel/perf_event.c .start = sparc_pmu_start, start 45 arch/sparc/kernel/power.c op->dev.of_node, res->start); start 738 arch/sparc/kernel/prom_irqtrans.c imap = res->start + 0x00UL; start 739 arch/sparc/kernel/prom_irqtrans.c iclr = res->start + 0x10UL; start 126 arch/sparc/kernel/ptrace_64.c unsigned long start = __pa(kaddr); start 127 arch/sparc/kernel/ptrace_64.c unsigned long end = start + len; start 133 arch/sparc/kernel/ptrace_64.c for (; start < end; start += dcache_line_size) start 134 arch/sparc/kernel/ptrace_64.c spitfire_put_dcache_tag(start & 0x3fe0, 0x0); start 136 arch/sparc/kernel/ptrace_64.c start &= ~(dcache_line_size - 1); start 137 arch/sparc/kernel/ptrace_64.c for (; start < end; start += dcache_line_size) start 142 arch/sparc/kernel/ptrace_64.c : "r" (start), start 148 arch/sparc/kernel/ptrace_64.c unsigned long start = (unsigned long) kaddr; start 149 arch/sparc/kernel/ptrace_64.c unsigned long end = start + len; start 154 arch/sparc/kernel/ptrace_64.c for (; start < end; start += icache_line_size) start 155 arch/sparc/kernel/ptrace_64.c flushi(start); start 252 arch/sparc/kernel/setup_32.c struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch; start 259 arch/sparc/kernel/setup_32.c while (start < end) { start 260 arch/sparc/kernel/setup_32.c unsigned long addr = start->addr; start 262 arch/sparc/kernel/setup_32.c *(unsigned int *)(addr) = start->insn; start 265 arch/sparc/kernel/setup_32.c start++; start 231 arch/sparc/kernel/setup_64.c void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start, start 234 arch/sparc/kernel/setup_64.c while (start < end) { start 235 arch/sparc/kernel/setup_64.c unsigned long addr = start->addr; start 237 arch/sparc/kernel/setup_64.c *(unsigned int *) (addr + 0) = start->insn; start 241 arch/sparc/kernel/setup_64.c start++; start 245 arch/sparc/kernel/setup_64.c void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, start 248 arch/sparc/kernel/setup_64.c while (start < end) { start 249 arch/sparc/kernel/setup_64.c unsigned long addr = start->addr; start 251 arch/sparc/kernel/setup_64.c *(unsigned int *) (addr + 0) = start->insns[0]; start 255 arch/sparc/kernel/setup_64.c *(unsigned int *) (addr + 4) = start->insns[1]; start 259 arch/sparc/kernel/setup_64.c start++; start 263 arch/sparc/kernel/setup_64.c void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, start 266 arch/sparc/kernel/setup_64.c while (start < end) { start 267 arch/sparc/kernel/setup_64.c unsigned long addr = start->addr; start 269 arch/sparc/kernel/setup_64.c *(unsigned int *) (addr + 0) = start->insns[0]; start 273 arch/sparc/kernel/setup_64.c *(unsigned int *) (addr + 4) = start->insns[1]; start 277 arch/sparc/kernel/setup_64.c start++; start 1150 arch/sparc/kernel/smp_64.c void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 1152 arch/sparc/kernel/smp_64.c start &= PAGE_MASK; start 1154 arch/sparc/kernel/smp_64.c if (start != end) { start 1156 arch/sparc/kernel/smp_64.c 0, start, end); start 1158 arch/sparc/kernel/smp_64.c __flush_tlb_kernel_range(start, end); start 451 arch/sparc/kernel/sun4d_irq.c res.start = reg[1]; start 119 arch/sparc/kernel/sys_sparc_32.c SYSCALL_DEFINE5(sparc_remap_file_pages, unsigned long, start, unsigned long, size, start 126 arch/sparc/kernel/sys_sparc_32.c return sys_remap_file_pages(start, size, prot, start 26 arch/sparc/kernel/systbls.h long sys_sparc_remap_file_pages(unsigned long start, unsigned long size, start 449 arch/sparc/kernel/time_64.c op->dev.of_node, op->resource[0].start); start 460 arch/sparc/kernel/time_64.c r->start = op->resource[0].start; start 463 arch/sparc/kernel/time_64.c cmos_regs = op->resource[0].start; start 505 arch/sparc/kernel/time_64.c op->dev.of_node, op->resource[0].start); start 530 arch/sparc/kernel/time_64.c void __iomem *regs = (void __iomem *) pdev->resource[0].start; start 538 arch/sparc/kernel/time_64.c void __iomem *regs = (void __iomem *) pdev->resource[0].start; start 569 arch/sparc/kernel/time_64.c dp, op->resource[0].start); start 10 arch/sparc/mm/extable.c void sort_extable(struct exception_table_entry *start, start 421 arch/sparc/mm/hugetlbpage.c unsigned long start; start 423 arch/sparc/mm/hugetlbpage.c start = addr; start 435 arch/sparc/mm/hugetlbpage.c start &= PUD_MASK; start 436 arch/sparc/mm/hugetlbpage.c if (start < floor) start 446 arch/sparc/mm/hugetlbpage.c pmd = pmd_offset(pud, start); start 448 arch/sparc/mm/hugetlbpage.c pmd_free_tlb(tlb, pmd, start); start 458 arch/sparc/mm/hugetlbpage.c unsigned long start; start 460 arch/sparc/mm/hugetlbpage.c start = addr; start 473 arch/sparc/mm/hugetlbpage.c start &= PGDIR_MASK; start 474 arch/sparc/mm/hugetlbpage.c if (start < floor) start 484 arch/sparc/mm/hugetlbpage.c pud = pud_offset(pgd, start); start 486 arch/sparc/mm/hugetlbpage.c pud_free_tlb(tlb, pud, start); start 221 arch/sparc/mm/init_32.c unsigned long start, end; start 223 arch/sparc/mm/init_32.c start = sp_banks[i].base_addr; start 224 arch/sparc/mm/init_32.c end = start + sp_banks[i].num_bytes; start 226 arch/sparc/mm/init_32.c while (start < end) { start 227 arch/sparc/mm/init_32.c set_bit(start >> 20, sparc_valid_addr_bitmap); start 228 arch/sparc/mm/init_32.c start += PAGE_SIZE; start 517 arch/sparc/mm/init_64.c void __kprobes flush_icache_range(unsigned long start, unsigned long end) start 526 arch/sparc/mm/init_64.c for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { start 746 arch/sparc/mm/init_64.c void __flush_dcache_range(unsigned long start, unsigned long end) start 753 arch/sparc/mm/init_64.c for (va = start; va < end; va += 32) { start 759 arch/sparc/mm/init_64.c start = __pa(start); start 761 arch/sparc/mm/init_64.c for (va = start; va < end; va += 32) start 975 arch/sparc/mm/init_64.c static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid) start 980 arch/sparc/mm/init_64.c for ( ; start < end; start += PAGE_SIZE) { start 984 arch/sparc/mm/init_64.c if ((start & p->mask) == p->match) { start 994 arch/sparc/mm/init_64.c start); start 1003 arch/sparc/mm/init_64.c return start > end ? end : start; start 1006 arch/sparc/mm/init_64.c static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) start 1013 arch/sparc/mm/init_64.c return memblock_nid_range_sun4u(start, end, nid); start 1015 arch/sparc/mm/init_64.c mblock = addr_to_mblock(start); start 1018 arch/sparc/mm/init_64.c start); start 1025 arch/sparc/mm/init_64.c pa_start = start + mblock->offset; start 1058 arch/sparc/mm/init_64.c start); start 1232 arch/sparc/mm/init_64.c unsigned long start, end; start 1234 arch/sparc/mm/init_64.c start = reg->base; start 1235 arch/sparc/mm/init_64.c end = start + size; start 1236 arch/sparc/mm/init_64.c while (start < end) { start 1240 arch/sparc/mm/init_64.c this_end = memblock_nid_range(start, end, &nid); start 1244 arch/sparc/mm/init_64.c nid, start, this_end); start 1246 arch/sparc/mm/init_64.c memblock_set_node(start, this_end - start, start 1250 arch/sparc/mm/init_64.c start = this_end; start 2093 arch/sparc/mm/init_64.c static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) start 2100 arch/sparc/mm/init_64.c while (start < end) { start 2101 arch/sparc/mm/init_64.c unsigned int *ia = (unsigned int *)(unsigned long)*start; start 2115 arch/sparc/mm/init_64.c start++; start 2484 arch/sparc/mm/init_64.c unsigned long start, end; start 2486 arch/sparc/mm/init_64.c start = pavail[i].phys_addr; start 2487 arch/sparc/mm/init_64.c end = start + pavail[i].reg_size; start 2489 arch/sparc/mm/init_64.c if (paddr >= start && paddr < end) start 2640 arch/sparc/mm/init_64.c void vmemmap_free(unsigned long start, unsigned long end, start 3063 arch/sparc/mm/init_64.c code_resource.start = compute_kern_paddr(_text); start 3065 arch/sparc/mm/init_64.c data_resource.start = compute_kern_paddr(_etext); start 3067 arch/sparc/mm/init_64.c bss_resource.start = compute_kern_paddr(__bss_start); start 3087 arch/sparc/mm/init_64.c res->start = pavail[i].phys_addr; start 3111 arch/sparc/mm/init_64.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 3113 arch/sparc/mm/init_64.c if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { start 3114 arch/sparc/mm/init_64.c if (start < LOW_OBP_ADDRESS) { start 3115 arch/sparc/mm/init_64.c flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); start 3116 arch/sparc/mm/init_64.c do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); start 3123 arch/sparc/mm/init_64.c flush_tsb_kernel_range(start, end); start 3124 arch/sparc/mm/init_64.c do_flush_tlb_kernel_range(start, end); start 87 arch/sparc/mm/iommu.c iommu->start = IOMMU_START; start 154 arch/sparc/mm/iommu.c unsigned long start; start 157 arch/sparc/mm/iommu.c start = (unsigned long)iopte; start 158 arch/sparc/mm/iommu.c end = PAGE_ALIGN(start + niopte*sizeof(iopte_t)); start 159 arch/sparc/mm/iommu.c start &= PAGE_MASK; start 161 arch/sparc/mm/iommu.c while(start < end) { start 162 arch/sparc/mm/iommu.c viking_mxcc_flush_page(start); start 163 arch/sparc/mm/iommu.c start += PAGE_SIZE; start 166 arch/sparc/mm/iommu.c while(start < end) { start 167 arch/sparc/mm/iommu.c viking_flush_page(start); start 168 arch/sparc/mm/iommu.c start += PAGE_SIZE; start 171 arch/sparc/mm/iommu.c while(start < end) { start 172 arch/sparc/mm/iommu.c __flush_page_to_ram(start); start 173 arch/sparc/mm/iommu.c start += PAGE_SIZE; start 211 arch/sparc/mm/iommu.c busa0 = iommu->start + (ioptex << PAGE_SHIFT); start 281 arch/sparc/mm/iommu.c unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT; start 284 arch/sparc/mm/iommu.c BUG_ON(busa < iommu->start); start 383 arch/sparc/mm/iommu.c *dma_handle = iommu->start + (ioptex << PAGE_SHIFT); start 397 arch/sparc/mm/iommu.c int ioptex = (busa - iommu->start) >> PAGE_SHIFT; start 288 arch/sparc/mm/leon_mm.c unsigned long start, start 306 arch/sparc/mm/leon_mm.c unsigned long start, start 254 arch/sparc/mm/srmmu.c static void srmmu_early_allocate_ptable_skeleton(unsigned long start, start 578 arch/sparc/mm/srmmu.c extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 585 arch/sparc/mm/srmmu.c extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 593 arch/sparc/mm/srmmu.c unsigned long start, unsigned long end); start 601 arch/sparc/mm/srmmu.c unsigned long start, unsigned long end); start 644 arch/sparc/mm/srmmu.c extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 654 arch/sparc/mm/srmmu.c extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 660 arch/sparc/mm/srmmu.c extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 668 arch/sparc/mm/srmmu.c extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 675 arch/sparc/mm/srmmu.c extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 692 arch/sparc/mm/srmmu.c static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, start 699 arch/sparc/mm/srmmu.c while (start < end) { start 700 arch/sparc/mm/srmmu.c pgdp = pgd_offset_k(start); start 709 arch/sparc/mm/srmmu.c pmdp = pmd_offset(__nocache_fix(pgdp), start); start 717 arch/sparc/mm/srmmu.c if (start > (0xffffffffUL - PMD_SIZE)) start 719 arch/sparc/mm/srmmu.c start = (start + PMD_SIZE) & PMD_MASK; start 723 arch/sparc/mm/srmmu.c static void __init srmmu_allocate_ptable_skeleton(unsigned long start, start 730 arch/sparc/mm/srmmu.c while (start < end) { start 731 arch/sparc/mm/srmmu.c pgdp = pgd_offset_k(start); start 739 arch/sparc/mm/srmmu.c pmdp = pmd_offset(pgdp, start); start 748 arch/sparc/mm/srmmu.c if (start > (0xffffffffUL - PMD_SIZE)) start 750 arch/sparc/mm/srmmu.c start = (start + PMD_SIZE) & PMD_MASK; start 776 arch/sparc/mm/srmmu.c static void __init srmmu_inherit_prom_mappings(unsigned long start, start 786 arch/sparc/mm/srmmu.c while (start <= end) { start 787 arch/sparc/mm/srmmu.c if (start == 0) start 789 arch/sparc/mm/srmmu.c if (start == 0xfef00000) start 790 arch/sparc/mm/srmmu.c start = KADB_DEBUGGER_BEGVM; start 791 arch/sparc/mm/srmmu.c probed = srmmu_probe(start); start 794 arch/sparc/mm/srmmu.c start += PAGE_SIZE; start 800 arch/sparc/mm/srmmu.c addr = start - PAGE_SIZE; start 802 arch/sparc/mm/srmmu.c if (!(start & ~(SRMMU_REAL_PMD_MASK))) { start 807 arch/sparc/mm/srmmu.c if (!(start & ~(SRMMU_PGDIR_MASK))) { start 812 arch/sparc/mm/srmmu.c pgdp = pgd_offset_k(start); start 815 arch/sparc/mm/srmmu.c start += SRMMU_PGDIR_SIZE; start 826 arch/sparc/mm/srmmu.c pmdp = pmd_offset(__nocache_fix(pgdp), start); start 841 arch/sparc/mm/srmmu.c x = (start >> PMD_SHIFT) & 15; start 844 arch/sparc/mm/srmmu.c start += SRMMU_REAL_PMD_SIZE; start 847 arch/sparc/mm/srmmu.c ptep = pte_offset_kernel(__nocache_fix(pmdp), start); start 849 arch/sparc/mm/srmmu.c start += PAGE_SIZE; start 914 arch/sparc/mm/srmmu.c sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ start 962 arch/sparc/mm/srmmu.c srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); start 1263 arch/sparc/mm/srmmu.c static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 1314 arch/sparc/mm/srmmu.c static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 1702 arch/sparc/mm/srmmu.c unsigned long start, start 1713 arch/sparc/mm/srmmu.c (unsigned long) vma, start, end); start 1714 arch/sparc/mm/srmmu.c local_ops->cache_range(vma, start, end); start 1719 arch/sparc/mm/srmmu.c unsigned long start, start 1730 arch/sparc/mm/srmmu.c (unsigned long) vma, start, end); start 1731 arch/sparc/mm/srmmu.c local_ops->tlb_range(vma, start, end); start 33 arch/sparc/mm/tsb.c static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end) start 42 arch/sparc/mm/tsb.c if (match >= start && match < end) start 52 arch/sparc/mm/tsb.c void flush_tsb_kernel_range(unsigned long start, unsigned long end) start 56 arch/sparc/mm/tsb.c if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) start 57 arch/sparc/mm/tsb.c return flush_tsb_kernel_range_scan(start, end); start 59 arch/sparc/mm/tsb.c for (v = start; v < end; v += PAGE_SIZE) { start 39 arch/sparc/net/bpf_jit_comp_64.c unsigned long start = (unsigned long) start_; start 42 arch/sparc/net/bpf_jit_comp_64.c start &= ~7UL; start 44 arch/sparc/net/bpf_jit_comp_64.c while (start < end) { start 45 arch/sparc/net/bpf_jit_comp_64.c flushi(start); start 46 arch/sparc/net/bpf_jit_comp_64.c start += 32; start 64 arch/sparc/oprofile/init.c ops->start = timer_start; start 352 arch/sparc/vdso/vma.c static unsigned long vdso_addr(unsigned long start, unsigned int len) start 358 arch/sparc/vdso/vma.c return start + (offset << PAGE_SHIFT); start 496 arch/um/drivers/mconsole_kern.c int err, start, end, n; start 509 arch/um/drivers/mconsole_kern.c n = (*dev->id)(&ptr, &start, &end); start 514 arch/um/drivers/mconsole_kern.c else if ((n < start) || (n > end)) { start 516 arch/um/drivers/mconsole_kern.c "%d and %d", start, end); start 8 arch/um/drivers/slip_common.c int i, n, size, start; start 31 arch/um/drivers/slip_common.c start = slip->pos; start 33 arch/um/drivers/slip_common.c size = slip_unesc(slip->ibuf[start + i], slip->ibuf,&slip->pos, start 37 arch/um/drivers/slip_common.c memmove(slip->ibuf, &slip->ibuf[start+i+1], start 1504 arch/um/drivers/ubd_kern.c int n, nsectors, start, end, bit; start 1516 arch/um/drivers/ubd_kern.c start = 0; start 1518 arch/um/drivers/ubd_kern.c bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); start 1519 arch/um/drivers/ubd_kern.c end = start; start 1526 arch/um/drivers/ubd_kern.c start * req->sectorsize; start 1527 arch/um/drivers/ubd_kern.c len = (end - start) * req->sectorsize; start 1529 arch/um/drivers/ubd_kern.c buf = &req->buffer[start * req->sectorsize]; start 1566 arch/um/drivers/ubd_kern.c start = end; start 1567 arch/um/drivers/ubd_kern.c } while(start < nsectors); start 695 arch/um/drivers/vector_kern.c char *start = str; start 707 arch/um/drivers/vector_kern.c err = kstrtouint(start, 0, &n); start 25 arch/um/include/asm/mmu_context.h unsigned long start, unsigned long end) start 24 arch/um/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 28 arch/um/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 55 arch/um/include/shared/mem_user.h extern void setup_physmem(unsigned long start, unsigned long usable, start 95 arch/um/kernel/mem.c static void __init fixrange_init(unsigned long start, unsigned long end, start 104 arch/um/kernel/mem.c vaddr = start; start 79 arch/um/kernel/physmem.c void __init setup_physmem(unsigned long start, unsigned long reserve_end, start 82 arch/um/kernel/physmem.c unsigned long reserve = reserve_end - start; start 111 arch/um/kernel/physmem.c memblock_add(__pa(start), len + highmem); start 112 arch/um/kernel/physmem.c memblock_reserve(__pa(start), reserve); start 337 arch/um/kernel/tlb.c static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) start 350 arch/um/kernel/tlb.c for (addr = start; addr < end;) { start 534 arch/um/kernel/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 536 arch/um/kernel/tlb.c flush_tlb_kernel_range_common(start, end); start 562 arch/um/kernel/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start 566 arch/um/kernel/tlb.c flush_tlb_kernel_range_common(start, end); start 567 arch/um/kernel/tlb.c else fix_range(vma->vm_mm, start, end, 0); start 571 arch/um/kernel/tlb.c void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, start 574 arch/um/kernel/tlb.c fix_range(mm, start, end, 0); start 94 arch/um/kernel/um_arch.c .start = c_start, start 362 arch/um/kernel/um_arch.c void apply_alternatives(struct alt_instr *start, struct alt_instr *end) start 129 arch/unicore32/include/asm/cacheflush.h unsigned long start, unsigned long end); start 140 arch/unicore32/include/asm/cacheflush.h #define flush_cache_user_range(vma, start, end) \ start 141 arch/unicore32/include/asm/cacheflush.h __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) start 153 arch/unicore32/include/asm/cacheflush.h #define clean_dcache_area(start, size) cpu_dcache_clean_area(start, size) start 189 arch/unicore32/include/asm/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 193 arch/unicore32/include/asm/cacheflush.h static inline void flush_cache_vunmap(unsigned long start, unsigned long end) start 19 arch/unicore32/include/asm/memblock.h unsigned long start; start 34 arch/unicore32/include/asm/memblock.h #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) start 35 arch/unicore32/include/asm/memblock.h #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) start 37 arch/unicore32/include/asm/memblock.h #define bank_phys_start(bank) ((bank)->start) start 38 arch/unicore32/include/asm/memblock.h #define bank_phys_end(bank) ((bank)->start + (bank)->size) start 88 arch/unicore32/include/asm/mmu_context.h unsigned long start, unsigned long end) start 167 arch/unicore32/include/asm/tlbflush.h #define local_flush_tlb_range(vma, start, end) \ start 168 arch/unicore32/include/asm/tlbflush.h __cpu_flush_user_tlb_range(start, end, vma) start 223 arch/unicore32/kernel/irq.c .start = io_v2p(PKUNITY_INTC_BASE), start 253 arch/unicore32/kernel/pci.c .start = 0, start 330 arch/unicore32/kernel/pci.c resource_size_t start = res->start; start 332 arch/unicore32/kernel/pci.c if (res->flags & IORESOURCE_IO && start & 0x300) start 333 arch/unicore32/kernel/pci.c start = (start + 0x3ff) & ~0x3ff; start 335 arch/unicore32/kernel/pci.c start = (start + align - 1) & ~(align - 1); start 337 arch/unicore32/kernel/pci.c return start; start 358 arch/unicore32/kernel/pci.c if (!r->start && r->end) { start 49 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_USB_BASE), start 53 arch/unicore32/kernel/puv3-core.c .start = IRQ_USB, start 56 arch/unicore32/kernel/puv3-core.c .start = IRQ_USB, start 81 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_SDC_BASE), start 86 arch/unicore32/kernel/puv3-core.c .start = IRQ_SDC, start 94 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_UNIGFX_BASE), start 102 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_RTC_BASE), start 107 arch/unicore32/kernel/puv3-core.c .start = IRQ_RTCAlarm, start 112 arch/unicore32/kernel/puv3-core.c .start = IRQ_RTC, start 120 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_OST_BASE) + 0x80, start 128 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_UART0_BASE), start 133 arch/unicore32/kernel/puv3-core.c .start = IRQ_UART0, start 141 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_UART1_BASE), start 146 arch/unicore32/kernel/puv3-core.c .start = IRQ_UART1, start 154 arch/unicore32/kernel/puv3-core.c .start = io_v2p(PKUNITY_UMAL_BASE), start 159 arch/unicore32/kernel/puv3-core.c .start = IRQ_UMAL, start 32 arch/unicore32/kernel/puv3-nb0916.c .start = 0xFFF80000, start 39 arch/unicore32/kernel/puv3-nb0916.c .start = io_v2p(PKUNITY_I2C_BASE), start 44 arch/unicore32/kernel/puv3-nb0916.c .start = IRQ_I2C, start 69 arch/unicore32/kernel/setup.c .start = 0, start 75 arch/unicore32/kernel/setup.c .start = 0, start 130 arch/unicore32/kernel/setup.c static int __init uc32_add_memory(unsigned long start, unsigned long size) start 136 arch/unicore32/kernel/setup.c "ignoring memory at %#lx\n", start); start 144 arch/unicore32/kernel/setup.c size -= start & ~PAGE_MASK; start 146 arch/unicore32/kernel/setup.c bank->start = PAGE_ALIGN(start); start 167 arch/unicore32/kernel/setup.c unsigned long size, start; start 180 arch/unicore32/kernel/setup.c start = PHYS_OFFSET; start 183 arch/unicore32/kernel/setup.c start = memparse(endp + 1, NULL); start 185 arch/unicore32/kernel/setup.c uc32_add_memory(start, size); start 197 arch/unicore32/kernel/setup.c kernel_code.start = virt_to_phys(_stext); start 199 arch/unicore32/kernel/setup.c kernel_data.start = virt_to_phys(_sdata); start 212 arch/unicore32/kernel/setup.c res->start = mi->bank[i].start; start 213 arch/unicore32/kernel/setup.c res->end = mi->bank[i].start + mi->bank[i].size - 1; start 218 arch/unicore32/kernel/setup.c if (kernel_code.start >= res->start && start 221 arch/unicore32/kernel/setup.c if (kernel_data.start >= res->start && start 350 arch/unicore32/kernel/setup.c .start = c_start, start 20 arch/unicore32/mm/flush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, start 48 arch/unicore32/mm/init.c unsigned long start, end; start 50 arch/unicore32/mm/init.c start = bank_pfn_start(bank); start 53 arch/unicore32/mm/init.c if (*min > start) start 54 arch/unicore32/mm/init.c *min = start; start 88 arch/unicore32/mm/init.c unsigned long start = memblock_region_memory_base_pfn(reg); start 91 arch/unicore32/mm/init.c if (start < max_low) { start 93 arch/unicore32/mm/init.c zhole_size[0] -= low_end - start; start 131 arch/unicore32/mm/init.c memblock_add(mi->bank[i].start, mi->bank[i].size); start 398 arch/unicore32/mm/mmu.c phys_addr_t start = reg->base; start 399 arch/unicore32/mm/mmu.c phys_addr_t end = start + reg->size; start 404 arch/unicore32/mm/mmu.c if (start >= end) start 407 arch/unicore32/mm/mmu.c map.pfn = __phys_to_pfn(start); start 408 arch/unicore32/mm/mmu.c map.virtual = __phys_to_virt(start); start 409 arch/unicore32/mm/mmu.c map.length = end - start; start 187 arch/x86/boot/compressed/acpi.c static u8 *scan_mem_for_rsdp(u8 *start, u32 length) start 192 arch/x86/boot/compressed/acpi.c end = start + length; start 195 arch/x86/boot/compressed/acpi.c for (address = start; address < end; address += ACPI_RSDP_SCAN_STEP) { start 408 arch/x86/boot/compressed/acpi.c immovable_mem[num].start = ma->base_address; start 118 arch/x86/boot/compressed/kaslr.c if (one->start + one->size <= two->start) start 121 arch/x86/boot/compressed/kaslr.c if (one->start >= two->start + two->size) start 136 arch/x86/boot/compressed/kaslr.c parse_memmap(char *p, unsigned long long *start, unsigned long long *size) start 156 arch/x86/boot/compressed/kaslr.c *start = memparse(p + 1, &p); start 168 arch/x86/boot/compressed/kaslr.c *start = 0; start 184 arch/x86/boot/compressed/kaslr.c unsigned long long start, size; start 190 arch/x86/boot/compressed/kaslr.c rc = parse_memmap(str, &start, &size); start 195 arch/x86/boot/compressed/kaslr.c if (start == 0) { start 203 arch/x86/boot/compressed/kaslr.c mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start; start 381 arch/x86/boot/compressed/kaslr.c mem_avoid[MEM_AVOID_ZO_RANGE].start = input; start 383 arch/x86/boot/compressed/kaslr.c add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start, start 391 arch/x86/boot/compressed/kaslr.c mem_avoid[MEM_AVOID_INITRD].start = initrd_start; start 402 arch/x86/boot/compressed/kaslr.c mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line; start 404 arch/x86/boot/compressed/kaslr.c add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start, start 408 arch/x86/boot/compressed/kaslr.c mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params; start 410 arch/x86/boot/compressed/kaslr.c add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start, start 436 arch/x86/boot/compressed/kaslr.c unsigned long earliest = img->start + img->size; start 441 arch/x86/boot/compressed/kaslr.c mem_avoid[i].start < earliest) { start 443 arch/x86/boot/compressed/kaslr.c earliest = overlap->start; start 453 arch/x86/boot/compressed/kaslr.c avoid.start = (unsigned long)ptr; start 456 arch/x86/boot/compressed/kaslr.c if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) { start 458 arch/x86/boot/compressed/kaslr.c earliest = overlap->start; start 488 arch/x86/boot/compressed/kaslr.c slot_area.addr = region->start; start 514 arch/x86/boot/compressed/kaslr.c addr = ALIGN(region->start, PUD_SIZE); start 516 arch/x86/boot/compressed/kaslr.c if (addr < region->start + region->size) start 517 arch/x86/boot/compressed/kaslr.c size = region->size - (addr - region->start); start 538 arch/x86/boot/compressed/kaslr.c if (addr >= region->start + image_size) { start 539 arch/x86/boot/compressed/kaslr.c tmp.start = region->start; start 540 arch/x86/boot/compressed/kaslr.c tmp.size = addr - region->start; start 544 arch/x86/boot/compressed/kaslr.c size = region->size - (addr - region->start) - i * PUD_SIZE; start 546 arch/x86/boot/compressed/kaslr.c tmp.start = addr + i * PUD_SIZE; start 585 arch/x86/boot/compressed/kaslr.c if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE) start 589 arch/x86/boot/compressed/kaslr.c if (entry->start + entry->size < minimum) start 593 arch/x86/boot/compressed/kaslr.c end = min(entry->size + entry->start, mem_limit); start 594 arch/x86/boot/compressed/kaslr.c if (entry->start >= end) start 596 arch/x86/boot/compressed/kaslr.c cur_entry.start = entry->start; start 597 arch/x86/boot/compressed/kaslr.c cur_entry.size = end - entry->start; start 599 arch/x86/boot/compressed/kaslr.c region.start = cur_entry.start; start 604 arch/x86/boot/compressed/kaslr.c start_orig = region.start; start 607 arch/x86/boot/compressed/kaslr.c if (region.start < minimum) start 608 arch/x86/boot/compressed/kaslr.c region.start = minimum; start 611 arch/x86/boot/compressed/kaslr.c region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); start 614 arch/x86/boot/compressed/kaslr.c if (region.start > cur_entry.start + cur_entry.size) start 618 arch/x86/boot/compressed/kaslr.c region.size -= region.start - start_orig; start 622 arch/x86/boot/compressed/kaslr.c region.start + region.size > KERNEL_IMAGE_SIZE) start 623 arch/x86/boot/compressed/kaslr.c region.size = KERNEL_IMAGE_SIZE - region.start; start 636 arch/x86/boot/compressed/kaslr.c if (overlap.start > region.start + image_size) { start 639 arch/x86/boot/compressed/kaslr.c beginning.start = region.start; start 640 arch/x86/boot/compressed/kaslr.c beginning.size = overlap.start - region.start; start 645 arch/x86/boot/compressed/kaslr.c if (overlap.start + overlap.size >= region.start + region.size) start 649 arch/x86/boot/compressed/kaslr.c region.size -= overlap.start - region.start + overlap.size; start 650 arch/x86/boot/compressed/kaslr.c region.start = overlap.start + overlap.size; start 679 arch/x86/boot/compressed/kaslr.c unsigned long long start, end, entry_end, region_end; start 685 arch/x86/boot/compressed/kaslr.c start = immovable_mem[i].start; start 686 arch/x86/boot/compressed/kaslr.c end = start + immovable_mem[i].size; start 687 arch/x86/boot/compressed/kaslr.c region_end = region->start + region->size; start 689 arch/x86/boot/compressed/kaslr.c entry.start = clamp(region->start, start, end); start 690 arch/x86/boot/compressed/kaslr.c entry_end = clamp(region_end, start, end); start 691 arch/x86/boot/compressed/kaslr.c entry.size = entry_end - entry.start; start 767 arch/x86/boot/compressed/kaslr.c region.start = md->phys_addr; start 795 arch/x86/boot/compressed/kaslr.c region.start = entry->addr; start 130 arch/x86/boot/compressed/kaslr_64.c void add_identity_map(unsigned long start, unsigned long size) start 132 arch/x86/boot/compressed/kaslr_64.c unsigned long end = start + size; start 135 arch/x86/boot/compressed/kaslr_64.c start = round_down(start, PMD_SIZE); start 137 arch/x86/boot/compressed/kaslr_64.c if (start >= end) start 142 arch/x86/boot/compressed/kaslr_64.c start, end); start 73 arch/x86/boot/compressed/misc.h unsigned long long start; start 98 arch/x86/boot/compressed/misc.h void add_identity_map(unsigned long start, unsigned long size); start 104 arch/x86/boot/compressed/misc.h static inline void add_identity_map(unsigned long start, unsigned long size) start 212 arch/x86/entry/vdso/vma.c static unsigned long vdso_addr(unsigned long start, unsigned len) start 221 arch/x86/entry/vdso/vma.c start = PAGE_ALIGN(start); start 224 arch/x86/entry/vdso/vma.c end = (start + len + PMD_SIZE - 1) & PMD_MASK; start 229 arch/x86/entry/vdso/vma.c if (end > start) { start 230 arch/x86/entry/vdso/vma.c offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); start 231 arch/x86/entry/vdso/vma.c addr = start + (offset << PAGE_SHIFT); start 233 arch/x86/entry/vdso/vma.c addr = start; start 526 arch/x86/events/amd/ibs.c .start = perf_ibs_start, start 551 arch/x86/events/amd/ibs.c .start = perf_ibs_start, start 404 arch/x86/events/amd/iommu.c .start = perf_iommu_start, start 216 arch/x86/events/amd/power.c .start = pmu_event_start, start 303 arch/x86/events/amd/uncore.c .start = amd_uncore_start, start 314 arch/x86/events/amd/uncore.c .start = amd_uncore_start, start 2300 arch/x86/events/core.c .start = x86_pmu_start, start 609 arch/x86/events/intel/bts.c bts_pmu.start = bts_event_start; start 4226 arch/x86/events/intel/core.c EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1"); start 4231 arch/x86/events/intel/core.c EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1"); start 482 arch/x86/events/intel/cstate.c .start = cstate_pmu_event_start, start 497 arch/x86/events/intel/cstate.c .start = cstate_pmu_event_start, start 287 arch/x86/events/intel/ds.c unsigned long start = (unsigned long)cea; start 301 arch/x86/events/intel/ds.c flush_tlb_kernel_range(start, start + size); start 307 arch/x86/events/intel/ds.c unsigned long start = (unsigned long)cea; start 314 arch/x86/events/intel/ds.c flush_tlb_kernel_range(start, start + size); start 1311 arch/x86/events/intel/pt.c if (filter->path.dentry && !fr[range].start) { start 1315 arch/x86/events/intel/pt.c msr_a = fr[range].start; start 1616 arch/x86/events/intel/pt.c pt_pmu.pmu.start = pt_event_start; start 663 arch/x86/events/intel/rapl.c rapl_pmus->pmu.start = rapl_pmu_event_start; start 841 arch/x86/events/intel/uncore.c .start = uncore_pmu_event_start, start 562 arch/x86/events/intel/uncore_snb.c .start = uncore_pmu_event_start, start 284 arch/x86/events/msr.c .start = msr_event_start, start 27 arch/x86/hyperv/mmu.c unsigned long start, unsigned long end) start 30 arch/x86/hyperv/mmu.c unsigned long cur = start, diff; start 141 arch/x86/hyperv/mmu.c ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { start 146 arch/x86/hyperv/mmu.c info->start, info->end); start 215 arch/x86/hyperv/mmu.c ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { start 221 arch/x86/hyperv/mmu.c info->start, info->end); start 49 arch/x86/ia32/ia32_aout.c static int set_brk(unsigned long start, unsigned long end) start 51 arch/x86/ia32/ia32_aout.c start = PAGE_ALIGN(start); start 53 arch/x86/ia32/ia32_aout.c if (end <= start) start 55 arch/x86/ia32/ia32_aout.c return vm_brk(start, end - start); start 74 arch/x86/include/asm/alternative.h extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); start 84 arch/x86/include/asm/alternative.h extern int alternatives_text_reserved(void *start, void *end); start 92 arch/x86/include/asm/alternative.h static inline int alternatives_text_reserved(void *start, void *end) start 26 arch/x86/include/asm/bootparam_utils.h .start = offsetof(struct boot_params, struct_member), \ start 31 arch/x86/include/asm/bootparam_utils.h unsigned int start; start 82 arch/x86/include/asm/bootparam_utils.h memcpy(save_base + to_save[i].start, start 83 arch/x86/include/asm/bootparam_utils.h bp_base + to_save[i].start, to_save[i].len); start 13 arch/x86/include/asm/e820/api.h extern bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type); start 14 arch/x86/include/asm/e820/api.h extern bool e820__mapped_any(u64 start, u64 end, enum e820_type type); start 15 arch/x86/include/asm/e820/api.h extern bool e820__mapped_all(u64 start, u64 end, enum e820_type type); start 17 arch/x86/include/asm/e820/api.h extern void e820__range_add (u64 start, u64 size, enum e820_type type); start 18 arch/x86/include/asm/e820/api.h extern u64 e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type); start 19 arch/x86/include/asm/e820/api.h extern u64 e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type); start 44 arch/x86/include/asm/e820/api.h extern int e820__get_entry_type(u64 start, u64 end); start 50 arch/x86/include/asm/e820/api.h static inline bool is_ISA_range(u64 start, u64 end) start 52 arch/x86/include/asm/e820/api.h return start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS; start 394 arch/x86/include/asm/io.h extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); start 395 arch/x86/include/asm/io.h extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); start 96 arch/x86/include/asm/iommu_table.h void sort_iommu_table(struct iommu_table_entry *start, start 99 arch/x86/include/asm/iommu_table.h void check_iommu_entries(struct iommu_table_entry *start, start 1556 arch/x86/include/asm/kvm_host.h int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); start 1557 arch/x86/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); start 281 arch/x86/include/asm/mmu_context.h static inline void arch_unmap(struct mm_struct *mm, unsigned long start, start 302 arch/x86/include/asm/mmu_context.h mpx_notify_unmap(mm, start, end); start 85 arch/x86/include/asm/mpx.h extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end); start 105 arch/x86/include/asm/mpx.h unsigned long start, unsigned long end) start 34 arch/x86/include/asm/numa.h extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); start 76 arch/x86/include/asm/page_types.h extern unsigned long init_memory_mapping(unsigned long start, start 370 arch/x86/include/asm/paravirt_types.h unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end); start 13 arch/x86/include/asm/pat.h extern int reserve_memtype(u64 start, u64 end, start 15 arch/x86/include/asm/pat.h extern int free_memtype(u64 start, u64 end); start 20 arch/x86/include/asm/pat.h int io_reserve_memtype(resource_size_t start, resource_size_t end, start 23 arch/x86/include/asm/pat.h void io_free_memtype(resource_size_t start, resource_size_t end); start 157 arch/x86/include/asm/pci_x86.h extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, start 159 arch/x86/include/asm/pci_x86.h extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end); start 161 arch/x86/include/asm/pci_x86.h extern struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, start 22 arch/x86/include/asm/pgalloc.h unsigned long start, unsigned long count) {} start 165 arch/x86/include/asm/pgtable_64.h extern void sync_global_pgds(unsigned long start, unsigned long end); start 259 arch/x86/include/asm/pgtable_64.h static inline bool gup_fast_permitted(unsigned long start, unsigned long end) start 42 arch/x86/include/asm/realmode.h u32 start; start 47 arch/x86/include/asm/realmode.h u64 start; start 11 arch/x86/include/asm/text-patching.h void apply_paravirt(struct paravirt_patch_site *start, start 14 arch/x86/include/asm/text-patching.h static inline void apply_paravirt(struct paravirt_patch_site *start, start 16 arch/x86/include/asm/tlb.h unsigned long start = 0UL, end = TLB_FLUSH_ALL; start 20 arch/x86/include/asm/tlb.h start = tlb->start; start 24 arch/x86/include/asm/tlb.h flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); start 559 arch/x86/include/asm/tlbflush.h unsigned long start; start 571 arch/x86/include/asm/tlbflush.h #define flush_tlb_range(vma, start, end) \ start 572 arch/x86/include/asm/tlbflush.h flush_tlb_mm_range((vma)->vm_mm, start, end, \ start 578 arch/x86/include/asm/tlbflush.h extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, start 581 arch/x86/include/asm/tlbflush.h extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 23 arch/x86/include/asm/trace/hyperv.h __entry->addr = info->start; start 69 arch/x86/include/asm/trace/mpx.h TP_PROTO(unsigned long start, start 71 arch/x86/include/asm/trace/mpx.h TP_ARGS(start, end), start 74 arch/x86/include/asm/trace/mpx.h __field(unsigned long, start) start 79 arch/x86/include/asm/trace/mpx.h __entry->start = start; start 84 arch/x86/include/asm/trace/mpx.h (void *)__entry->start, start 90 arch/x86/include/asm/trace/mpx.h TP_PROTO(unsigned long start, unsigned long end), start 91 arch/x86/include/asm/trace/mpx.h TP_ARGS(start, end) start 95 arch/x86/include/asm/trace/mpx.h TP_PROTO(unsigned long start, unsigned long end), start 96 arch/x86/include/asm/trace/mpx.h TP_ARGS(start, end) start 22 arch/x86/include/asm/vga.h unsigned long start = (unsigned long)phys_to_virt(x); \ start 25 arch/x86/include/asm/vga.h set_memory_decrypted(start, (s) >> PAGE_SHIFT); \ start 27 arch/x86/include/asm/vga.h start; \ start 274 arch/x86/include/asm/x86_init.h bool (*is_untracked_pat_range)(u64 start, u64 end); start 930 arch/x86/kernel/acpi/boot.c hpet_res->start = hpet_address; start 369 arch/x86/kernel/alternative.c void __init_or_module noinline apply_alternatives(struct alt_instr *start, start 376 arch/x86/kernel/alternative.c DPRINTK("alt table %px, -> %px", start, end); start 386 arch/x86/kernel/alternative.c for (a = start; a < end; a++) { start 440 arch/x86/kernel/alternative.c static void alternatives_smp_lock(const s32 *start, const s32 *end, start 445 arch/x86/kernel/alternative.c for (poff = start; poff < end; poff++) { start 456 arch/x86/kernel/alternative.c static void alternatives_smp_unlock(const s32 *start, const s32 *end, start 461 arch/x86/kernel/alternative.c for (poff = start; poff < end; poff++) { start 568 arch/x86/kernel/alternative.c int alternatives_text_reserved(void *start, void *end) start 572 arch/x86/kernel/alternative.c u8 *text_start = start; start 593 arch/x86/kernel/alternative.c void __init_or_module apply_paravirt(struct paravirt_patch_site *start, start 599 arch/x86/kernel/alternative.c for (p = start; p < end; p++) { start 325 arch/x86/kernel/amd_gart_64.c static int __dma_map_cont(struct device *dev, struct scatterlist *start, start 337 arch/x86/kernel/amd_gart_64.c for_each_sg(start, s, nelems, i) { start 341 arch/x86/kernel/amd_gart_64.c BUG_ON(s != start && s->offset); start 342 arch/x86/kernel/amd_gart_64.c if (s == start) { start 364 arch/x86/kernel/amd_gart_64.c dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, start 369 arch/x86/kernel/amd_gart_64.c sout->dma_address = start->dma_address; start 370 arch/x86/kernel/amd_gart_64.c sout->dma_length = start->length; start 373 arch/x86/kernel/amd_gart_64.c return __dma_map_cont(dev, start, nelems, sout, pages); start 384 arch/x86/kernel/amd_gart_64.c int need = 0, nextneed, i, out, start; start 393 arch/x86/kernel/amd_gart_64.c start = 0; start 409 arch/x86/kernel/amd_gart_64.c if (i > start) { start 418 arch/x86/kernel/amd_gart_64.c if (dma_map_cont(dev, start_sg, i - start, start 426 arch/x86/kernel/amd_gart_64.c start = i; start 436 arch/x86/kernel/amd_gart_64.c if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) start 374 arch/x86/kernel/amd_nb.c res->start = base; start 245 arch/x86/kernel/apb_timer.c u64 start, now; start 253 arch/x86/kernel/apb_timer.c start = rdtsc(); start 264 arch/x86/kernel/apb_timer.c } while ((now - start) < 200000UL); start 2878 arch/x86/kernel/apic/apic.c lapic_resource.start = apic_phys; start 2879 arch/x86/kernel/apic/apic.c lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; start 1588 arch/x86/kernel/apic/io_apic.c unsigned long long start, now; start 1591 arch/x86/kernel/apic/io_apic.c start = rdtsc(); start 1602 arch/x86/kernel/apic/io_apic.c } while ((now - start) < 40000000000ULL / HZ && start 2700 arch/x86/kernel/apic/io_apic.c ioapic_res->start = ioapic_phys; start 79 arch/x86/kernel/apic/x2apic_uv_x.c static inline bool is_GRU_range(u64 start, u64 end) start 82 arch/x86/kernel/apic/x2apic_uv_x.c u64 su = start & gru_dist_umask; /* Upper (incl pnode) bits */ start 83 arch/x86/kernel/apic/x2apic_uv_x.c u64 sl = start & gru_dist_lmask; /* Base offset bits */ start 93 arch/x86/kernel/apic/x2apic_uv_x.c return start >= gru_start_paddr && end <= gru_end_paddr; start 97 arch/x86/kernel/apic/x2apic_uv_x.c static bool uv_is_untracked_pat_range(u64 start, u64 end) start 99 arch/x86/kernel/apic/x2apic_uv_x.c return is_ISA_range(start, end) || is_GRU_range(start, end); start 507 arch/x86/kernel/apic/x2apic_uv_x.c unsigned long start, end; start 510 arch/x86/kernel/apic/x2apic_uv_x.c start = gb < 0 ? 0 : (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT; start 513 arch/x86/kernel/apic/x2apic_uv_x.c pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n", i, grt->nasid, start, end, gb); start 94 arch/x86/kernel/check.c phys_addr_t start, end; start 115 arch/x86/kernel/check.c for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, start 117 arch/x86/kernel/check.c start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE), start 121 arch/x86/kernel/check.c if (start >= end) start 124 arch/x86/kernel/check.c memblock_reserve(start, end - start); start 125 arch/x86/kernel/check.c scan_areas[num_scan_areas].addr = start; start 126 arch/x86/kernel/check.c scan_areas[num_scan_areas].size = end - start; start 129 arch/x86/kernel/check.c memset(__va(start), 0, end - start); start 232 arch/x86/kernel/cpu/mce/inject.c unsigned long start; start 258 arch/x86/kernel/cpu/mce/inject.c start = jiffies; start 260 arch/x86/kernel/cpu/mce/inject.c if (!time_before(jiffies, start + 2*HZ)) { start 372 arch/x86/kernel/cpu/mce/severity.c .start = s_start, start 260 arch/x86/kernel/cpu/microcode/core.c unsigned long start = 0; start 278 arch/x86/kernel/cpu/microcode/core.c start = params->hdr.ramdisk_image; start 285 arch/x86/kernel/cpu/microcode/core.c start = (unsigned long)boot_params.ext_ramdisk_image << 32; start 286 arch/x86/kernel/cpu/microcode/core.c start |= boot_params.hdr.ramdisk_image; start 288 arch/x86/kernel/cpu/microcode/core.c start += PAGE_OFFSET; start 305 arch/x86/kernel/cpu/microcode/core.c start = initrd_start; start 316 arch/x86/kernel/cpu/microcode/core.c start = *rr; start 319 arch/x86/kernel/cpu/microcode/core.c return find_cpio_data(path, (void *)start, size, NULL); start 86 arch/x86/kernel/cpu/mtrr/cleanup.c range[i].start, range[i].end); start 121 arch/x86/kernel/cpu/mtrr/cleanup.c range[i].start, range[i].end); start 131 arch/x86/kernel/cpu/mtrr/cleanup.c range[i].start, range[i].end); start 145 arch/x86/kernel/cpu/mtrr/cleanup.c sum += range[i].end - range[i].start; start 490 arch/x86/kernel/cpu/mtrr/cleanup.c set_var_mtrr_range(&var_state, range[i].start, start 491 arch/x86/kernel/cpu/mtrr/cleanup.c range[i].end - range[i].start); start 947 arch/x86/kernel/cpu/mtrr/cleanup.c range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); start 957 arch/x86/kernel/cpu/mtrr/cleanup.c if (range[0].start) start 958 arch/x86/kernel/cpu/mtrr/cleanup.c total_trim_size += real_trim_memory(0, range[0].start); start 962 arch/x86/kernel/cpu/mtrr/cleanup.c if (range[i].end < range[i+1].start) start 964 arch/x86/kernel/cpu/mtrr/cleanup.c range[i+1].start); start 120 arch/x86/kernel/cpu/mtrr/generic.c static u8 mtrr_type_lookup_fixed(u64 start, u64 end) start 124 arch/x86/kernel/cpu/mtrr/generic.c if (start >= 0x100000) start 128 arch/x86/kernel/cpu/mtrr/generic.c if (start < 0x80000) { start 130 arch/x86/kernel/cpu/mtrr/generic.c idx += (start >> 16); start 133 arch/x86/kernel/cpu/mtrr/generic.c } else if (start < 0xC0000) { start 135 arch/x86/kernel/cpu/mtrr/generic.c idx += ((start - 0x80000) >> 14); start 141 arch/x86/kernel/cpu/mtrr/generic.c idx += ((start - 0xC0000) >> 12); start 160 arch/x86/kernel/cpu/mtrr/generic.c static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, start 185 arch/x86/kernel/cpu/mtrr/generic.c start_state = ((start & mask) == (base & mask)); start 187 arch/x86/kernel/cpu/mtrr/generic.c inclusive = ((start < base) && (end > base)); start 216 arch/x86/kernel/cpu/mtrr/generic.c if (unlikely(*partial_end <= start)) { start 218 arch/x86/kernel/cpu/mtrr/generic.c *partial_end = start + PAGE_SIZE; start 226 arch/x86/kernel/cpu/mtrr/generic.c if ((start & mask) != (base & mask)) start 258 arch/x86/kernel/cpu/mtrr/generic.c u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) start 274 arch/x86/kernel/cpu/mtrr/generic.c if ((start < 0x100000) && start 278 arch/x86/kernel/cpu/mtrr/generic.c type = mtrr_type_lookup_fixed(start, end); start 286 arch/x86/kernel/cpu/mtrr/generic.c type = mtrr_type_lookup_variable(start, end, &partial_end, start 297 arch/x86/kernel/cpu/mtrr/generic.c start = partial_end; start 299 arch/x86/kernel/cpu/mtrr/generic.c type = mtrr_type_lookup_variable(start, end, &partial_end, start 306 arch/x86/kernel/cpu/mtrr/generic.c if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2)) start 163 arch/x86/kernel/cpu/proc.c .start = c_start, start 871 arch/x86/kernel/cpu/resctrl/pseudo_lock.c u64 start, end; start 884 arch/x86/kernel/cpu/resctrl/pseudo_lock.c start = rdtsc_ordered(); start 886 arch/x86/kernel/cpu/resctrl/pseudo_lock.c start = rdtsc_ordered(); start 892 arch/x86/kernel/cpu/resctrl/pseudo_lock.c trace_pseudo_lock_mem_latency((u32)(end - start)); start 221 arch/x86/kernel/crash.c ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); start 226 arch/x86/kernel/crash.c ret = crash_exclude_mem_range(cmem, crashk_low_res.start, start 237 arch/x86/kernel/crash.c cmem->ranges[cmem->nr_ranges].start = res->start; start 311 arch/x86/kernel/crash.c ei.addr = res->start; start 323 arch/x86/kernel/crash.c unsigned long start, end; start 326 arch/x86/kernel/crash.c cmem->ranges[0].start = mstart; start 331 arch/x86/kernel/crash.c start = image->arch.backup_load_addr; start 332 arch/x86/kernel/crash.c end = start + image->arch.backup_src_sz - 1; start 333 arch/x86/kernel/crash.c ret = crash_exclude_mem_range(cmem, start, end); start 338 arch/x86/kernel/crash.c start = image->arch.elf_load_addr; start 339 arch/x86/kernel/crash.c end = start + image->arch.elf_headers_sz - 1; start 340 arch/x86/kernel/crash.c return crash_exclude_mem_range(cmem, start, end); start 384 arch/x86/kernel/crash.c ei.addr = crashk_low_res.start; start 385 arch/x86/kernel/crash.c ei.size = crashk_low_res.end - crashk_low_res.start + 1; start 391 arch/x86/kernel/crash.c ret = memmap_exclude_ranges(image, cmem, crashk_res.start, start 397 arch/x86/kernel/crash.c ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; start 402 arch/x86/kernel/crash.c ei.addr = cmem->ranges[i].start; start 416 arch/x86/kernel/crash.c image->arch.backup_src_start = res->start; start 131 arch/x86/kernel/devicetree.c hpet_address = r.start; start 166 arch/x86/kernel/devicetree.c lapic_addr = r.start; start 259 arch/x86/kernel/devicetree.c mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg); start 78 arch/x86/kernel/e820.c u64 start, u64 end, enum e820_type type) start 87 arch/x86/kernel/e820.c if (entry->addr >= end || entry->addr + entry->size <= start) start 94 arch/x86/kernel/e820.c bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type) start 96 arch/x86/kernel/e820.c return _e820__mapped_any(e820_table_firmware, start, end, type); start 100 arch/x86/kernel/e820.c bool e820__mapped_any(u64 start, u64 end, enum e820_type type) start 102 arch/x86/kernel/e820.c return _e820__mapped_any(e820_table, start, end, type); start 112 arch/x86/kernel/e820.c static struct e820_entry *__e820__mapped_all(u64 start, u64 end, start 124 arch/x86/kernel/e820.c if (entry->addr >= end || entry->addr + entry->size <= start) start 131 arch/x86/kernel/e820.c if (entry->addr <= start) start 132 arch/x86/kernel/e820.c start = entry->addr + entry->size; start 138 arch/x86/kernel/e820.c if (start >= end) start 148 arch/x86/kernel/e820.c bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type) start 150 arch/x86/kernel/e820.c return __e820__mapped_all(start, end, type); start 156 arch/x86/kernel/e820.c int e820__get_entry_type(u64 start, u64 end) start 158 arch/x86/kernel/e820.c struct e820_entry *entry = __e820__mapped_all(start, end, 0); start 166 arch/x86/kernel/e820.c static void __init __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type) start 172 arch/x86/kernel/e820.c start, start + size - 1); start 176 arch/x86/kernel/e820.c table->entries[x].addr = start; start 182 arch/x86/kernel/e820.c void __init e820__range_add(u64 start, u64 size, enum e820_type type) start 184 arch/x86/kernel/e820.c __e820__range_add(e820_table, start, size, type); start 412 arch/x86/kernel/e820.c u64 start = entry->addr; start 414 arch/x86/kernel/e820.c u64 end = start + size - 1; start 418 arch/x86/kernel/e820.c if (start > end && likely(size)) start 421 arch/x86/kernel/e820.c e820__range_add(start, size, type); start 448 arch/x86/kernel/e820.c __e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) start 456 arch/x86/kernel/e820.c if (size > (ULLONG_MAX - start)) start 457 arch/x86/kernel/e820.c size = ULLONG_MAX - start; start 459 arch/x86/kernel/e820.c end = start + size; start 460 arch/x86/kernel/e820.c printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ", start, end - 1); start 477 arch/x86/kernel/e820.c if (entry->addr >= start && entry_end <= end) { start 484 arch/x86/kernel/e820.c if (entry->addr < start && entry_end > end) { start 485 arch/x86/kernel/e820.c __e820__range_add(table, start, size, new_type); start 487 arch/x86/kernel/e820.c entry->size = start - entry->addr; start 493 arch/x86/kernel/e820.c final_start = max(start, entry->addr); start 515 arch/x86/kernel/e820.c u64 __init e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) start 517 arch/x86/kernel/e820.c return __e820__range_update(e820_table, start, size, old_type, new_type); start 520 arch/x86/kernel/e820.c static u64 __init e820__range_update_kexec(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) start 522 arch/x86/kernel/e820.c return __e820__range_update(e820_table_kexec, start, size, old_type, new_type); start 526 arch/x86/kernel/e820.c u64 __init e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type) start 532 arch/x86/kernel/e820.c if (size > (ULLONG_MAX - start)) start 533 arch/x86/kernel/e820.c size = ULLONG_MAX - start; start 535 arch/x86/kernel/e820.c end = start + size; start 536 arch/x86/kernel/e820.c printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ", start, end - 1); start 552 arch/x86/kernel/e820.c if (entry->addr >= start && entry_end <= end) { start 559 arch/x86/kernel/e820.c if (entry->addr < start && entry_end > end) { start 561 arch/x86/kernel/e820.c entry->size = start - entry->addr; start 567 arch/x86/kernel/e820.c final_start = max(start, entry->addr); start 613 arch/x86/kernel/e820.c unsigned long long start = e820_table->entries[i].addr; start 614 arch/x86/kernel/e820.c unsigned long long end = start + e820_table->entries[i].size; start 629 arch/x86/kernel/e820.c if (start < last) start 630 arch/x86/kernel/e820.c last = start; start 1077 arch/x86/kernel/e820.c if (res->start < (1ULL<<20)) start 1126 arch/x86/kernel/e820.c res->start = entry->addr; start 1191 arch/x86/kernel/e820.c u64 start, end; start 1196 arch/x86/kernel/e820.c start = entry->addr + entry->size; start 1197 arch/x86/kernel/e820.c end = round_up(start, ram_alignment(start)) - 1; start 1200 arch/x86/kernel/e820.c if (start >= end) start 1203 arch/x86/kernel/e820.c printk(KERN_DEBUG "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n", start, end); start 1204 arch/x86/kernel/e820.c reserve_region_with_split(&iomem_resource, start, end, "RAM buffer"); start 573 arch/x86/kernel/early-quirks.c intel_graphics_stolen_res.start = base; start 91 arch/x86/kernel/ftrace.c within(unsigned long addr, unsigned long start, unsigned long end) start 93 arch/x86/kernel/ftrace.c return addr >= start && addr < end; start 786 arch/x86/kernel/hpet.c u64 start, now, t1; start 791 arch/x86/kernel/hpet.c start = rdtsc(); start 803 arch/x86/kernel/hpet.c } while ((now - start) < 200000UL); start 206 arch/x86/kernel/kexec-bzimage64.c unsigned long long mem_k, start, end; start 242 arch/x86/kernel/kexec-bzimage64.c start = params->e820_table[i].addr; start 245 arch/x86/kernel/kexec-bzimage64.c if ((start <= 0x100000) && end > 0x100000) { start 206 arch/x86/kernel/kprobes/opt.c static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) start 229 arch/x86/kernel/kprobes/opt.c return (start <= target && target <= start + len); start 311 arch/x86/kernel/ldt.c unsigned long start = LDT_BASE_ADDR; start 317 arch/x86/kernel/ldt.c tlb_gather_mmu(&tlb, mm, start, end); start 318 arch/x86/kernel/ldt.c free_pgd_range(&tlb, start, end, start, end); start 319 arch/x86/kernel/ldt.c tlb_finish_mmu(&tlb, start, end); start 240 arch/x86/kernel/machine_kexec_32.c image->start = relocate_kernel_ptr((unsigned long)image->head, start 242 arch/x86/kernel/machine_kexec_32.c image->start, start 46 arch/x86/kernel/machine_kexec_64.c mstart = res->start; start 219 arch/x86/kernel/machine_kexec_64.c mstart = pfn_mapped[i].start << PAGE_SHIFT; start 434 arch/x86/kernel/machine_kexec_64.c image->start = relocate_kernel((unsigned long)image->head, start 436 arch/x86/kernel/machine_kexec_64.c image->start, start 613 arch/x86/kernel/machine_kexec_64.c kexec_mark_range(unsigned long start, unsigned long end, bool protect) start 622 arch/x86/kernel/machine_kexec_64.c if (!end || start > end) start 625 arch/x86/kernel/machine_kexec_64.c page = pfn_to_page(start >> PAGE_SHIFT); start 626 arch/x86/kernel/machine_kexec_64.c nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; start 637 arch/x86/kernel/machine_kexec_64.c kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect); start 642 arch/x86/kernel/machine_kexec_64.c kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect); start 41 arch/x86/kernel/mmconf-fam10h_64.c start1 = r1->start >> 32; start 42 arch/x86/kernel/mmconf-fam10h_64.c start2 = r2->start >> 32; start 121 arch/x86/kernel/mmconf-fam10h_64.c u64 start; start 127 arch/x86/kernel/mmconf-fam10h_64.c start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/ start 134 arch/x86/kernel/mmconf-fam10h_64.c range[hi_mmio_num].start = start; start 147 arch/x86/kernel/mmconf-fam10h_64.c if (range[0].start > base + MMCONF_SIZE) start 151 arch/x86/kernel/mmconf-fam10h_64.c base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT; start 160 arch/x86/kernel/mmconf-fam10h_64.c val = range[i].start & MMCONF_MASK; start 150 arch/x86/kernel/paravirt.c const char *start, const char *end) start 152 arch/x86/kernel/paravirt.c unsigned insn_len = end - start; start 155 arch/x86/kernel/paravirt.c BUG_ON(insn_len > len || start == NULL); start 157 arch/x86/kernel/paravirt.c memcpy(insn_buff, start, insn_len); start 194 arch/x86/kernel/paravirt.c .start = 0, start 640 arch/x86/kernel/pci-calgary_64.c static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start, start 648 arch/x86/kernel/pci-calgary_64.c numpages = ((limit - start) >> PAGE_SHIFT); start 649 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(pci_iommu(dev->bus), start, numpages); start 656 arch/x86/kernel/pci-calgary_64.c u64 start, limit; start 669 arch/x86/kernel/pci-calgary_64.c start = (high << 32) | low; start 672 arch/x86/kernel/pci-calgary_64.c calgary_reserve_mem_region(dev, start, limit); start 680 arch/x86/kernel/pci-calgary_64.c u64 start, limit; start 700 arch/x86/kernel/pci-calgary_64.c start = (high << 32) | low; start 703 arch/x86/kernel/pci-calgary_64.c calgary_reserve_mem_region(dev, start, limit); start 716 arch/x86/kernel/pci-calgary_64.c u64 start; start 722 arch/x86/kernel/pci-calgary_64.c start = (640 * 1024); start 725 arch/x86/kernel/pci-calgary_64.c start = 0; start 728 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(tbl, start, npages); start 1538 arch/x86/kernel/pci-calgary_64.c if (!r->start) start 1545 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(tbl, r->start, npages); start 11 arch/x86/kernel/pci-iommu_table.c find_dependents_of(struct iommu_table_entry *start, start 20 arch/x86/kernel/pci-iommu_table.c for (p = start; p < finish; p++) start 28 arch/x86/kernel/pci-iommu_table.c void __init sort_iommu_table(struct iommu_table_entry *start, start 33 arch/x86/kernel/pci-iommu_table.c for (p = start; p < finish; p++) { start 35 arch/x86/kernel/pci-iommu_table.c q = find_dependents_of(start, finish, p); start 50 arch/x86/kernel/pci-iommu_table.c void __init check_iommu_entries(struct iommu_table_entry *start, start 56 arch/x86/kernel/pci-iommu_table.c for (p = start; p < finish; p++) { start 57 arch/x86/kernel/pci-iommu_table.c q = find_dependents_of(start, finish, p); start 58 arch/x86/kernel/pci-iommu_table.c x = find_dependents_of(start, finish, q); start 67 arch/x86/kernel/pci-iommu_table.c for (p = start; p < finish; p++) { start 76 arch/x86/kernel/pci-iommu_table.c void __init check_iommu_entries(struct iommu_table_entry *start, start 27 arch/x86/kernel/probe_roms.c .start = 0xf0000, start 34 arch/x86/kernel/probe_roms.c .start = 0xe0000, start 41 arch/x86/kernel/probe_roms.c .start = 0xc8000, start 46 arch/x86/kernel/probe_roms.c .start = 0, start 51 arch/x86/kernel/probe_roms.c .start = 0, start 56 arch/x86/kernel/probe_roms.c .start = 0, start 61 arch/x86/kernel/probe_roms.c .start = 0, start 66 arch/x86/kernel/probe_roms.c .start = 0, start 73 arch/x86/kernel/probe_roms.c .start = 0xc0000, start 127 arch/x86/kernel/probe_roms.c rom = isa_bus_to_virt(res->start); start 161 arch/x86/kernel/probe_roms.c return ioremap(oprom->start, resource_size(oprom)); start 201 arch/x86/kernel/probe_roms.c unsigned long start, length, upper; start 206 arch/x86/kernel/probe_roms.c upper = adapter_rom_resources[0].start; start 207 arch/x86/kernel/probe_roms.c for (start = video_rom_resource.start; start < upper; start += 2048) { start 208 arch/x86/kernel/probe_roms.c rom = isa_bus_to_virt(start); start 212 arch/x86/kernel/probe_roms.c video_rom_resource.start = start; start 222 arch/x86/kernel/probe_roms.c video_rom_resource.end = start + length - 1; start 228 arch/x86/kernel/probe_roms.c start = (video_rom_resource.end + 1 + 2047) & ~2047UL; start 229 arch/x86/kernel/probe_roms.c if (start < upper) start 230 arch/x86/kernel/probe_roms.c start = upper; start 234 arch/x86/kernel/probe_roms.c upper = system_rom_resource.start; start 237 arch/x86/kernel/probe_roms.c rom = isa_bus_to_virt(extension_rom_resource.start); start 242 arch/x86/kernel/probe_roms.c upper = extension_rom_resource.start; start 247 arch/x86/kernel/probe_roms.c for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) { start 248 arch/x86/kernel/probe_roms.c rom = isa_bus_to_virt(start); start 259 arch/x86/kernel/probe_roms.c if (!length || start + length > upper || !romchecksum(rom, length)) start 262 arch/x86/kernel/probe_roms.c adapter_rom_resources[i].start = start; start 263 arch/x86/kernel/probe_roms.c adapter_rom_resources[i].end = start + length - 1; start 266 arch/x86/kernel/probe_roms.c start = adapter_rom_resources[i++].end & ~2047UL; start 804 arch/x86/kernel/process.c unsigned long start, bottom, top, sp, fp, ip, ret = 0; start 813 arch/x86/kernel/process.c start = (unsigned long)task_stack_page(p); start 814 arch/x86/kernel/process.c if (!start) start 833 arch/x86/kernel/process.c top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; start 835 arch/x86/kernel/process.c bottom = start; start 5 arch/x86/kernel/resource.c static void resource_clip(struct resource *res, resource_size_t start, start 10 arch/x86/kernel/resource.c if (res->end < start || res->start > end) start 13 arch/x86/kernel/resource.c if (res->start < start) start 14 arch/x86/kernel/resource.c low = start - res->start; start 21 arch/x86/kernel/resource.c res->end = start - 1; start 23 arch/x86/kernel/resource.c res->start = end + 1; start 162 arch/x86/kernel/rtc.c .start = RTC_PORT(0), start 167 arch/x86/kernel/rtc.c .start = RTC_IRQ, start 148 arch/x86/kernel/setup.c .start = 0, start 155 arch/x86/kernel/setup.c .start = 0, start 162 arch/x86/kernel/setup.c .start = 0, start 522 arch/x86/kernel/setup.c crashk_low_res.start = low_base; start 576 arch/x86/kernel/setup.c unsigned long long start; start 578 arch/x86/kernel/setup.c start = memblock_find_in_range(crash_base, start 581 arch/x86/kernel/setup.c if (start != crash_base) { start 602 arch/x86/kernel/setup.c crashk_res.start = crash_base; start 613 arch/x86/kernel/setup.c { .name = "dma1", .start = 0x00, .end = 0x1f, start 615 arch/x86/kernel/setup.c { .name = "pic1", .start = 0x20, .end = 0x21, start 617 arch/x86/kernel/setup.c { .name = "timer0", .start = 0x40, .end = 0x43, start 619 arch/x86/kernel/setup.c { .name = "timer1", .start = 0x50, .end = 0x53, start 621 arch/x86/kernel/setup.c { .name = "keyboard", .start = 0x60, .end = 0x60, start 623 arch/x86/kernel/setup.c { .name = "keyboard", .start = 0x64, .end = 0x64, start 625 arch/x86/kernel/setup.c { .name = "dma page reg", .start = 0x80, .end = 0x8f, start 627 arch/x86/kernel/setup.c { .name = "pic2", .start = 0xa0, .end = 0xa1, start 629 arch/x86/kernel/setup.c { .name = "dma2", .start = 0xc0, .end = 0xdf, start 631 arch/x86/kernel/setup.c { .name = "fpu", .start = 0xf0, .end = 0xff, start 758 arch/x86/kernel/setup.c u64 start = __pa_symbol(_text); start 759 arch/x86/kernel/setup.c u64 size = __pa_symbol(_end) - start; start 768 arch/x86/kernel/setup.c if (e820__mapped_all(start, start + size, E820_TYPE_RAM)) start 772 arch/x86/kernel/setup.c e820__range_remove(start, size, E820_TYPE_RAM, 0); start 773 arch/x86/kernel/setup.c e820__range_add(start, size, E820_TYPE_RAM); start 952 arch/x86/kernel/setup.c code_resource.start = __pa_symbol(_text); start 954 arch/x86/kernel/setup.c data_resource.start = __pa_symbol(_etext); start 956 arch/x86/kernel/setup.c bss_resource.start = __pa_symbol(__bss_start); start 1308 arch/x86/kernel/setup.c .start = 0xa0000, start 122 arch/x86/kernel/sysfb_efi.c resource_size_t start, end; start 135 arch/x86/kernel/sysfb_efi.c start = pci_resource_start(dev, i); start 137 arch/x86/kernel/sysfb_efi.c if (screen_info.lfb_base >= start && start 103 arch/x86/kernel/sysfb_simplefb.c res.start = base; start 104 arch/x86/kernel/sysfb_simplefb.c res.end = res.start + length - 1; start 105 arch/x86/kernel/sysfb_simplefb.c if (res.end <= res.start) start 172 arch/x86/kernel/tboot.c static void add_mac_region(phys_addr_t start, unsigned long size) start 175 arch/x86/kernel/tboot.c phys_addr_t end = start + size; start 180 arch/x86/kernel/tboot.c if (start && size) { start 182 arch/x86/kernel/tboot.c mr->start = round_down(start, PAGE_SIZE); start 183 arch/x86/kernel/tboot.c mr->size = round_up(end, PAGE_SIZE) - mr->start; start 228 arch/x86/kernel/tsc_sync.c cycles_t start, now, prev, end, cur_max_warp = 0; start 231 arch/x86/kernel/tsc_sync.c start = rdtsc_ordered(); start 235 arch/x86/kernel/tsc_sync.c end = start + (cycles_t) tsc_khz * timeout; start 236 arch/x86/kernel/tsc_sync.c now = start; start 281 arch/x86/kernel/tsc_sync.c WARN(!(now-start), start 283 arch/x86/kernel/tsc_sync.c now-start, end-start); start 150 arch/x86/kernel/unwind_orc.c unsigned int idx, start, stop; start 160 arch/x86/kernel/unwind_orc.c start = orc_lookup[idx]; start 163 arch/x86/kernel/unwind_orc.c if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || start 166 arch/x86/kernel/unwind_orc.c idx, lookup_num_blocks, start, stop, (void *)ip); start 170 arch/x86/kernel/unwind_orc.c return __orc_find(__start_orc_unwind_ip + start, start 171 arch/x86/kernel/unwind_orc.c __start_orc_unwind + start, stop - start, ip); start 1991 arch/x86/kvm/mmu.c unsigned long start, start 2013 arch/x86/kvm/mmu.c hva_start = max(start, memslot->userspace_addr); start 2048 arch/x86/kvm/mmu.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) start 2050 arch/x86/kvm/mmu.c return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); start 2102 arch/x86/kvm/mmu.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) start 2104 arch/x86/kvm/mmu.c return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); start 3233 arch/x86/kvm/mmu.c u64 *start, u64 *end) start 3241 arch/x86/kvm/mmu.c gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); start 3246 arch/x86/kvm/mmu.c ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); start 3250 arch/x86/kvm/mmu.c for (i = 0; i < ret; i++, gfn++, start++) { start 3251 arch/x86/kvm/mmu.c mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, start 3262 arch/x86/kvm/mmu.c u64 *spte, *start = NULL; start 3272 arch/x86/kvm/mmu.c if (!start) start 3274 arch/x86/kvm/mmu.c if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) start 3276 arch/x86/kvm/mmu.c start = NULL; start 3277 arch/x86/kvm/mmu.c } else if (!start) start 3278 arch/x86/kvm/mmu.c start = spte; start 5957 arch/x86/kvm/mmu.c gfn_t start, end; start 5959 arch/x86/kvm/mmu.c start = max(gfn_start, memslot->base_gfn); start 5961 arch/x86/kvm/mmu.c if (start >= end) start 5966 arch/x86/kvm/mmu.c start, end - 1, true); start 136 arch/x86/kvm/mtrr.c u64 start; start 148 arch/x86/kvm/mtrr.c .start = 0x0, start 159 arch/x86/kvm/mtrr.c .start = 0x80000, start 170 arch/x86/kvm/mtrr.c .start = 0xc0000, start 212 arch/x86/kvm/mtrr.c static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) start 217 arch/x86/kvm/mtrr.c *start = mtrr_seg->start + unit * unit_size; start 218 arch/x86/kvm/mtrr.c *end = *start + unit_size; start 226 arch/x86/kvm/mtrr.c WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) start 238 arch/x86/kvm/mtrr.c n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift; start 242 arch/x86/kvm/mtrr.c static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) start 249 arch/x86/kvm/mtrr.c fixed_mtrr_seg_unit_range(seg, unit, start, end); start 270 arch/x86/kvm/mtrr.c if (mtrr_seg->start <= addr && addr < mtrr_seg->end) start 284 arch/x86/kvm/mtrr.c index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift; start 293 arch/x86/kvm/mtrr.c return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift); start 296 arch/x86/kvm/mtrr.c static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) start 300 arch/x86/kvm/mtrr.c *start = range->base & PAGE_MASK; start 307 arch/x86/kvm/mtrr.c *end = (*start | ~mask) + 1; start 313 arch/x86/kvm/mtrr.c gfn_t start, end; start 324 arch/x86/kvm/mtrr.c if (fixed_msr_to_range(msr, &start, &end)) { start 328 arch/x86/kvm/mtrr.c start = 0x0; start 333 arch/x86/kvm/mtrr.c var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end); start 336 arch/x86/kvm/mtrr.c kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); start 447 arch/x86/kvm/mtrr.c u64 start; start 483 arch/x86/kvm/mtrr.c seg = fixed_mtrr_addr_to_seg(iter->start); start 488 arch/x86/kvm/mtrr.c index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); start 497 arch/x86/kvm/mtrr.c u64 start, end; start 499 arch/x86/kvm/mtrr.c var_mtrr_range(range, &start, &end); start 500 arch/x86/kvm/mtrr.c if (!(start >= iter->end || end <= iter->start)) { start 508 arch/x86/kvm/mtrr.c iter->partial_map |= iter->start_max < start; start 535 arch/x86/kvm/mtrr.c iter->start_max = iter->start; start 579 arch/x86/kvm/mtrr.c struct kvm_mtrr *mtrr_state, u64 start, u64 end) start 582 arch/x86/kvm/mtrr.c iter->start = start; start 623 arch/x86/kvm/mtrr.c u64 start, end; start 628 arch/x86/kvm/mtrr.c start = gfn_to_gpa(gfn); start 629 arch/x86/kvm/mtrr.c end = start + PAGE_SIZE; start 631 arch/x86/kvm/mtrr.c mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { start 700 arch/x86/kvm/mtrr.c u64 start, end; start 703 arch/x86/kvm/mtrr.c start = gfn_to_gpa(gfn); start 705 arch/x86/kvm/mtrr.c mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { start 6422 arch/x86/kvm/svm.c struct sev_data_launch_start *start; start 6434 arch/x86/kvm/svm.c start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT); start 6435 arch/x86/kvm/svm.c if (!start) start 6446 arch/x86/kvm/svm.c start->dh_cert_address = __sme_set(__pa(dh_blob)); start 6447 arch/x86/kvm/svm.c start->dh_cert_len = params.dh_len; start 6458 arch/x86/kvm/svm.c start->session_address = __sme_set(__pa(session_blob)); start 6459 arch/x86/kvm/svm.c start->session_len = params.session_len; start 6462 arch/x86/kvm/svm.c start->handle = params.handle; start 6463 arch/x86/kvm/svm.c start->policy = params.policy; start 6466 arch/x86/kvm/svm.c ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error); start 6471 arch/x86/kvm/svm.c ret = sev_bind_asid(kvm, start->handle, error); start 6476 arch/x86/kvm/svm.c params.handle = start->handle; start 6478 arch/x86/kvm/svm.c sev_unbind_asid(kvm, start->handle); start 6483 arch/x86/kvm/svm.c sev->handle = start->handle; start 6491 arch/x86/kvm/svm.c kfree(start); start 4688 arch/x86/kvm/x86.c int start = 0; start 4697 arch/x86/kvm/x86.c start = 1; start 4703 arch/x86/kvm/x86.c start && i == 0); start 7982 arch/x86/kvm/x86.c unsigned long start, unsigned long end) start 7991 arch/x86/kvm/x86.c if (start <= apic_address && apic_address < end) start 95 arch/x86/lib/delay.c u64 start, end, delay, loops = __loops; start 104 arch/x86/lib/delay.c start = rdtsc_ordered(); start 124 arch/x86/lib/delay.c if (loops <= end - start) start 127 arch/x86/lib/delay.c loops -= end - start; start 129 arch/x86/lib/delay.c start = end; start 57 arch/x86/mm/amdtopology.c u64 start = PFN_PHYS(0); start 128 arch/x86/mm/amdtopology.c if (base < start) start 129 arch/x86/mm/amdtopology.c base = start; start 179 arch/x86/mm/cpu_entry_area.c unsigned long start, end; start 186 arch/x86/mm/cpu_entry_area.c start = CPU_ENTRY_AREA_BASE; start 187 arch/x86/mm/cpu_entry_area.c end = start + CPU_ENTRY_AREA_MAP_SIZE; start 190 arch/x86/mm/cpu_entry_area.c for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) start 191 arch/x86/mm/cpu_entry_area.c populate_extra_pte(start); start 409 arch/x86/mm/dump_pagetables.c pmd_t *start, *pmd_start; start 412 arch/x86/mm/dump_pagetables.c pmd_start = start = (pmd_t *)pud_page_vaddr(addr); start 415 arch/x86/mm/dump_pagetables.c if (!pmd_none(*start)) { start 416 arch/x86/mm/dump_pagetables.c prot = pmd_flags(*start); start 418 arch/x86/mm/dump_pagetables.c if (pmd_large(*start) || !pmd_present(*start)) { start 421 arch/x86/mm/dump_pagetables.c walk_pte_level(m, st, *start, eff, start 426 arch/x86/mm/dump_pagetables.c start++; start 442 arch/x86/mm/dump_pagetables.c pud_t *start, *pud_start; start 445 arch/x86/mm/dump_pagetables.c pud_start = start = (pud_t *)p4d_page_vaddr(addr); start 449 arch/x86/mm/dump_pagetables.c if (!pud_none(*start)) { start 450 arch/x86/mm/dump_pagetables.c prot = pud_flags(*start); start 452 arch/x86/mm/dump_pagetables.c if (pud_large(*start) || !pud_present(*start)) { start 455 arch/x86/mm/dump_pagetables.c walk_pmd_level(m, st, *start, eff, start 461 arch/x86/mm/dump_pagetables.c start++; start 475 arch/x86/mm/dump_pagetables.c p4d_t *start, *p4d_start; start 481 arch/x86/mm/dump_pagetables.c p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); start 485 arch/x86/mm/dump_pagetables.c if (!p4d_none(*start)) { start 486 arch/x86/mm/dump_pagetables.c prot = p4d_flags(*start); start 488 arch/x86/mm/dump_pagetables.c if (p4d_large(*start) || !p4d_present(*start)) { start 491 arch/x86/mm/dump_pagetables.c walk_pud_level(m, st, *start, eff, start 497 arch/x86/mm/dump_pagetables.c start++; start 521 arch/x86/mm/dump_pagetables.c pgd_t *start = INIT_PGD; start 527 arch/x86/mm/dump_pagetables.c start = pgd; start 537 arch/x86/mm/dump_pagetables.c if (!pgd_none(*start) && !is_hypervisor_range(i)) { start 538 arch/x86/mm/dump_pagetables.c prot = pgd_flags(*start); start 544 arch/x86/mm/dump_pagetables.c if (pgd_large(*start) || !pgd_present(*start)) { start 547 arch/x86/mm/dump_pagetables.c walk_p4d_level(m, &st, *start, eff, start 554 arch/x86/mm/dump_pagetables.c start++; start 28 arch/x86/mm/hugetlbpage.c unsigned long start = address; start 168 arch/x86/mm/init.c unsigned long start; start 268 arch/x86/mm/init.c mr[nr_range].start = start_pfn<<PAGE_SHIFT; start 289 arch/x86/mm/init.c unsigned long start = round_down(mr[i].start, PMD_SIZE); start 297 arch/x86/mm/init.c if (memblock_is_region_memory(start, end - start)) start 302 arch/x86/mm/init.c unsigned long start = round_down(mr[i].start, PUD_SIZE); start 305 arch/x86/mm/init.c if (memblock_is_region_memory(start, end - start)) start 337 arch/x86/mm/init.c unsigned long start, start 347 arch/x86/mm/init.c pfn = start_pfn = PFN_DOWN(start); start 417 arch/x86/mm/init.c if (mr[i].end != mr[i+1].start || start 421 arch/x86/mm/init.c old_start = mr[i].start; start 424 arch/x86/mm/init.c mr[i--].start = old_start; start 430 arch/x86/mm/init.c mr[i].start, mr[i].end - 1, start 457 arch/x86/mm/init.c if ((start_pfn >= pfn_mapped[i].start) && start 469 arch/x86/mm/init.c unsigned long __ref init_memory_mapping(unsigned long start, start 477 arch/x86/mm/init.c start, end - 1); start 480 arch/x86/mm/init.c nr_range = split_mem_range(mr, 0, start, end); start 483 arch/x86/mm/init.c ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, start 486 arch/x86/mm/init.c add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); start 513 arch/x86/mm/init.c u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); start 515 arch/x86/mm/init.c if (start >= end) start 522 arch/x86/mm/init.c can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= start 524 arch/x86/mm/init.c init_memory_mapping(start, end); start 525 arch/x86/mm/init.c mapped_ram_size += end - start; start 564 arch/x86/mm/init.c unsigned long real_end, start, last_start; start 577 arch/x86/mm/init.c last_start = start = real_end; start 587 arch/x86/mm/init.c start = round_down(last_start - 1, step_size); start 588 arch/x86/mm/init.c if (start < map_start) start 589 arch/x86/mm/init.c start = map_start; start 591 arch/x86/mm/init.c start = map_start; start 592 arch/x86/mm/init.c mapped_ram_size += init_range_memory_mapping(start, start 594 arch/x86/mm/init.c last_start = start; start 618 arch/x86/mm/init.c unsigned long next, start; start 623 arch/x86/mm/init.c start = map_start; start 624 arch/x86/mm/init.c min_pfn_mapped = start >> PAGE_SHIFT; start 632 arch/x86/mm/init.c while (start < map_end) { start 633 arch/x86/mm/init.c if (step_size && map_end - start > step_size) { start 634 arch/x86/mm/init.c next = round_up(start + 1, step_size); start 641 arch/x86/mm/init.c mapped_ram_size += init_range_memory_mapping(start, next); start 642 arch/x86/mm/init.c start = next; start 872 arch/x86/mm/init.c void __init free_initrd_mem(unsigned long start, unsigned long end) start 883 arch/x86/mm/init.c free_init_pages("initrd", start, PAGE_ALIGN(end)); start 127 arch/x86/mm/init_32.c page_table_range_init_count(unsigned long start, unsigned long end) start 139 arch/x86/mm/init_32.c vaddr = start; start 208 arch/x86/mm/init_32.c page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) start 215 arch/x86/mm/init_32.c unsigned long count = page_table_range_init_count(start, end); start 221 arch/x86/mm/init_32.c vaddr = start; start 253 arch/x86/mm/init_32.c kernel_physical_mapping_init(unsigned long start, start 269 arch/x86/mm/init_32.c start_pfn = start >> PAGE_SHIFT; start 437 arch/x86/mm/init_32.c phys_addr_t start, end; start 440 arch/x86/mm/init_32.c for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { start 441 arch/x86/mm/init_32.c unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), start 854 arch/x86/mm/init_32.c int arch_add_memory(int nid, u64 start, u64 size, start 857 arch/x86/mm/init_32.c unsigned long start_pfn = start >> PAGE_SHIFT; start 863 arch/x86/mm/init_32.c void arch_remove_memory(int nid, u64 start, u64 size, start 866 arch/x86/mm/init_32.c unsigned long start_pfn = start >> PAGE_SHIFT; start 877 arch/x86/mm/init_32.c unsigned long start = PFN_ALIGN(_text); start 878 arch/x86/mm/init_32.c unsigned long size = PFN_ALIGN(_etext) - start; start 884 arch/x86/mm/init_32.c start, start+size); start 886 arch/x86/mm/init_32.c set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); start 891 arch/x86/mm/init_32.c unsigned long start = PFN_ALIGN(_text); start 892 arch/x86/mm/init_32.c unsigned long size = PFN_ALIGN(_etext) - start; start 898 arch/x86/mm/init_32.c start, start+size); start 900 arch/x86/mm/init_32.c set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); start 909 arch/x86/mm/init_32.c unsigned long start = PFN_ALIGN(_etext); start 913 arch/x86/mm/init_32.c unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; start 917 arch/x86/mm/init_32.c set_memory_nx(start, size >> PAGE_SHIFT); start 922 arch/x86/mm/init_32.c unsigned long start = PFN_ALIGN(_text); start 923 arch/x86/mm/init_32.c unsigned long size = (unsigned long)__end_rodata - start; start 925 arch/x86/mm/init_32.c set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); start 932 arch/x86/mm/init_32.c pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size); start 933 arch/x86/mm/init_32.c set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); start 936 arch/x86/mm/init_32.c set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); start 127 arch/x86/mm/init_64.c static void sync_global_pgds_l5(unsigned long start, unsigned long end) start 131 arch/x86/mm/init_64.c for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { start 136 arch/x86/mm/init_64.c if (addr < start) start 164 arch/x86/mm/init_64.c static void sync_global_pgds_l4(unsigned long start, unsigned long end) start 168 arch/x86/mm/init_64.c for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { start 212 arch/x86/mm/init_64.c void sync_global_pgds(unsigned long start, unsigned long end) start 215 arch/x86/mm/init_64.c sync_global_pgds_l5(start, end); start 217 arch/x86/mm/init_64.c sync_global_pgds_l4(start, end); start 835 arch/x86/mm/init_64.c static void update_end_of_memory_vars(u64 start, u64 size) start 837 arch/x86/mm/init_64.c unsigned long end_pfn = PFN_UP(start + size); start 861 arch/x86/mm/init_64.c int arch_add_memory(int nid, u64 start, u64 size, start 864 arch/x86/mm/init_64.c unsigned long start_pfn = start >> PAGE_SHIFT; start 867 arch/x86/mm/init_64.c init_memory_mapping(start, start + size); start 1173 arch/x86/mm/init_64.c remove_pagetable(unsigned long start, unsigned long end, bool direct, start 1181 arch/x86/mm/init_64.c for (addr = start; addr < end; addr = next) { start 1195 arch/x86/mm/init_64.c void __ref vmemmap_free(unsigned long start, unsigned long end, start 1198 arch/x86/mm/init_64.c remove_pagetable(start, end, false, altmap); start 1202 arch/x86/mm/init_64.c kernel_physical_mapping_remove(unsigned long start, unsigned long end) start 1204 arch/x86/mm/init_64.c start = (unsigned long)__va(start); start 1207 arch/x86/mm/init_64.c remove_pagetable(start, end, true, NULL); start 1210 arch/x86/mm/init_64.c void __ref arch_remove_memory(int nid, u64 start, u64 size, start 1213 arch/x86/mm/init_64.c unsigned long start_pfn = start >> PAGE_SHIFT; start 1217 arch/x86/mm/init_64.c kernel_physical_mapping_remove(start, start + size); start 1263 arch/x86/mm/init_64.c unsigned long start = PFN_ALIGN(_text); start 1270 arch/x86/mm/init_64.c start, end); start 1277 arch/x86/mm/init_64.c set_memory_rw(start, (end - start) >> PAGE_SHIFT); start 1282 arch/x86/mm/init_64.c unsigned long start = PFN_ALIGN(_text); start 1289 arch/x86/mm/init_64.c start, end); start 1294 arch/x86/mm/init_64.c set_memory_ro(start, (end - start) >> PAGE_SHIFT); start 1299 arch/x86/mm/init_64.c unsigned long start = PFN_ALIGN(_text); start 1307 arch/x86/mm/init_64.c (end - start) >> 10); start 1308 arch/x86/mm/init_64.c set_memory_ro(start, (end - start) >> PAGE_SHIFT); start 1328 arch/x86/mm/init_64.c printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); start 1329 arch/x86/mm/init_64.c set_memory_rw(start, (end-start) >> PAGE_SHIFT); start 1332 arch/x86/mm/init_64.c set_memory_ro(start, (end-start) >> PAGE_SHIFT); start 1449 arch/x86/mm/init_64.c static int __meminit vmemmap_populate_hugepages(unsigned long start, start 1459 arch/x86/mm/init_64.c for (addr = start; addr < end; addr = next) { start 1514 arch/x86/mm/init_64.c int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, start 1519 arch/x86/mm/init_64.c if (end - start < PAGES_PER_SECTION * sizeof(struct page)) start 1520 arch/x86/mm/init_64.c err = vmemmap_populate_basepages(start, end, node); start 1522 arch/x86/mm/init_64.c err = vmemmap_populate_hugepages(start, end, node, altmap); start 1528 arch/x86/mm/init_64.c err = vmemmap_populate_basepages(start, end, node); start 1530 arch/x86/mm/init_64.c sync_global_pgds(start, end - 1); start 77 arch/x86/mm/ioremap.c start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; start 150 arch/x86/mm/ioremap.c u64 start, end; start 152 arch/x86/mm/ioremap.c start = (u64)addr; start 153 arch/x86/mm/ioremap.c end = start + size - 1; start 156 arch/x86/mm/ioremap.c walk_mem_res(start, end, desc, __ioremap_collect_map_flags); start 509 arch/x86/mm/ioremap.c unsigned long start = phys & PAGE_MASK; start 514 arch/x86/mm/ioremap.c vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); start 162 arch/x86/mm/kasan_init_64.c unsigned long start; start 165 arch/x86/mm/kasan_init_64.c start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); start 168 arch/x86/mm/kasan_init_64.c kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); start 171 arch/x86/mm/kasan_init_64.c static void __init clear_pgds(unsigned long start, start 178 arch/x86/mm/kasan_init_64.c for (; start < pgd_end; start += PGDIR_SIZE) { start 179 arch/x86/mm/kasan_init_64.c pgd = pgd_offset_k(start); start 187 arch/x86/mm/kasan_init_64.c p4d_clear(p4d_offset(pgd, start)); start 190 arch/x86/mm/kasan_init_64.c pgd = pgd_offset_k(start); start 191 arch/x86/mm/kasan_init_64.c for (; start < end; start += P4D_SIZE) start 192 arch/x86/mm/kasan_init_64.c p4d_clear(p4d_offset(pgd, start)); start 13 arch/x86/mm/mm_internal.h unsigned long kernel_physical_mapping_init(unsigned long start, start 16 arch/x86/mm/mm_internal.h unsigned long kernel_physical_mapping_change(unsigned long start, start 629 arch/x86/mm/mpx.c unsigned long start; start 638 arch/x86/mm/mpx.c start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping); start 652 arch/x86/mm/mpx.c vma = find_vma(mm, start); start 653 arch/x86/mm/mpx.c if (!vma || vma->vm_start > start) start 662 arch/x86/mm/mpx.c addr = start; start 775 arch/x86/mm/mpx.c unsigned long start, unsigned long end) start 783 arch/x86/mm/mpx.c unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1); start 794 arch/x86/mm/mpx.c next = find_vma_prev(mm, start, &prev); start 814 arch/x86/mm/mpx.c next = find_vma_prev(mm, start, &prev); start 821 arch/x86/mm/mpx.c start = bta_start_vaddr; start 825 arch/x86/mm/mpx.c bde_vaddr = mm->context.bd_addr + mpx_get_bd_entry_offset(mm, start); start 842 arch/x86/mm/mpx.c if ((start == bta_start_vaddr) && start 845 arch/x86/mm/mpx.c return zap_bt_entries_mapping(mm, bt_addr, start, end); start 849 arch/x86/mm/mpx.c unsigned long start, unsigned long end) start 852 arch/x86/mm/mpx.c trace_mpx_unmap_search(start, end); start 854 arch/x86/mm/mpx.c one_unmap_start = start; start 884 arch/x86/mm/mpx.c void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, start 906 arch/x86/mm/mpx.c vma = find_vma(mm, start); start 913 arch/x86/mm/mpx.c ret = mpx_unmap_tables(mm, start, end); start 130 arch/x86/mm/numa.c static int __init numa_add_memblk_to(int nid, u64 start, u64 end, start 134 arch/x86/mm/numa.c if (start == end) start 138 arch/x86/mm/numa.c if (start > end || nid < 0 || nid >= MAX_NUMNODES) { start 140 arch/x86/mm/numa.c nid, start, end - 1); start 149 arch/x86/mm/numa.c mi->blk[mi->nr_blks].start = start; start 182 arch/x86/mm/numa.c int __init numa_add_memblk(int nid, u64 start, u64 end) start 184 arch/x86/mm/numa.c return numa_add_memblk_to(nid, start, end, &numa_meminfo); start 241 arch/x86/mm/numa.c bi->start = max(bi->start, low); start 245 arch/x86/mm/numa.c if (bi->start >= bi->end || start 247 arch/x86/mm/numa.c bi->start, bi->end - bi->start)) start 257 arch/x86/mm/numa.c u64 start, end; start 264 arch/x86/mm/numa.c if (bi->end > bj->start && bi->start < bj->end) { start 267 arch/x86/mm/numa.c bi->nid, bi->start, bi->end - 1, start 268 arch/x86/mm/numa.c bj->nid, bj->start, bj->end - 1); start 272 arch/x86/mm/numa.c bi->nid, bi->start, bi->end - 1, start 273 arch/x86/mm/numa.c bj->start, bj->end - 1); start 283 arch/x86/mm/numa.c start = min(bi->start, bj->start); start 290 arch/x86/mm/numa.c if (start < bk->end && end > bk->start) start 296 arch/x86/mm/numa.c bi->nid, bi->start, bi->end - 1, bj->start, start 297 arch/x86/mm/numa.c bj->end - 1, start, end - 1); start 298 arch/x86/mm/numa.c bi->start = start; start 306 arch/x86/mm/numa.c mi->blk[i].start = mi->blk[i].end = 0; start 322 arch/x86/mm/numa.c if (mi->blk[i].start != mi->blk[i].end && start 443 arch/x86/mm/numa.c u64 s = mi->blk[i].start >> PAGE_SHIFT; start 491 arch/x86/mm/numa.c ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); start 523 arch/x86/mm/numa.c memblock_clear_hotplug(mb->start, mb->end - mb->start); start 540 arch/x86/mm/numa.c memblock_set_node(mb->start, mb->end - mb->start, start 571 arch/x86/mm/numa.c u64 start = PFN_PHYS(max_pfn); start 577 arch/x86/mm/numa.c start = min(mi->blk[i].start, start); start 581 arch/x86/mm/numa.c if (start >= end) start 588 arch/x86/mm/numa.c if (end && (end - start) < NODE_MIN_SIZE) start 885 arch/x86/mm/numa.c int memory_add_physaddr_to_nid(u64 start) start 892 arch/x86/mm/numa.c if (mi->blk[i].start <= start && mi->blk[i].end > start) start 46 arch/x86/mm/numa_32.c void memory_present(int nid, unsigned long start, unsigned long end) start 51 arch/x86/mm/numa_32.c nid, start, end); start 54 arch/x86/mm/numa_32.c start = round_down(start, PAGES_PER_SECTION); start 56 arch/x86/mm/numa_32.c for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { start 31 arch/x86/mm/numa_emulation.c static u64 __init mem_hole_size(u64 start, u64 end) start 33 arch/x86/mm/numa_emulation.c unsigned long start_pfn = PFN_UP(start); start 58 arch/x86/mm/numa_emulation.c eb->start = pb->start; start 59 arch/x86/mm/numa_emulation.c eb->end = pb->start + size; start 65 arch/x86/mm/numa_emulation.c pb->start += size; start 66 arch/x86/mm/numa_emulation.c if (pb->start >= pb->end) { start 67 arch/x86/mm/numa_emulation.c WARN_ON_ONCE(pb->start > pb->end); start 72 arch/x86/mm/numa_emulation.c nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); start 128 arch/x86/mm/numa_emulation.c u64 start, limit, end; start 136 arch/x86/mm/numa_emulation.c start = pi->blk[phys_blk].start; start 138 arch/x86/mm/numa_emulation.c end = start + size; start 147 arch/x86/mm/numa_emulation.c while (end - start - mem_hole_size(start, end) < size) { start 174 arch/x86/mm/numa_emulation.c min(end, limit) - start); start 186 arch/x86/mm/numa_emulation.c static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) start 188 arch/x86/mm/numa_emulation.c u64 end = start + size; start 190 arch/x86/mm/numa_emulation.c while (end - start - mem_hole_size(start, end) < size) { start 275 arch/x86/mm/numa_emulation.c u64 start, limit, end; start 284 arch/x86/mm/numa_emulation.c start = pi->blk[phys_blk].start; start 288 arch/x86/mm/numa_emulation.c end = start + size; start 290 arch/x86/mm/numa_emulation.c end = find_end_of_node(start, limit, size); start 311 arch/x86/mm/numa_emulation.c min(end, limit) - start); start 411 arch/x86/mm/numa_emulation.c pi.blk[0].start, pi.blk[0].end, 0, start 476 arch/x86/mm/numa_emulation.c if (ei.blk[i].start != ei.blk[i].end && start 9 arch/x86/mm/numa_internal.h u64 start; start 192 arch/x86/mm/pageattr.c within(unsigned long addr, unsigned long start, unsigned long end) start 194 arch/x86/mm/pageattr.c return addr >= start && addr < end; start 198 arch/x86/mm/pageattr.c within_inclusive(unsigned long addr, unsigned long start, unsigned long end) start 200 arch/x86/mm/pageattr.c return addr >= start && addr <= end; start 436 arch/x86/mm/pageattr.c static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end) start 441 arch/x86/mm/pageattr.c if (overlaps(start, end, t_start, t_end)) start 456 arch/x86/mm/pageattr.c static pgprotval_t protect_kernel_text_ro(unsigned long start, start 463 arch/x86/mm/pageattr.c if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end)) start 477 arch/x86/mm/pageattr.c if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) start 482 arch/x86/mm/pageattr.c static pgprotval_t protect_kernel_text_ro(unsigned long start, start 495 arch/x86/mm/pageattr.c unsigned long start, unsigned long end, start 508 arch/x86/mm/pageattr.c lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot), start 518 arch/x86/mm/pageattr.c static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, start 533 arch/x86/mm/pageattr.c end = start + npg * PAGE_SIZE - 1; start 535 arch/x86/mm/pageattr.c res = protect_kernel_text(start, end); start 536 arch/x86/mm/pageattr.c check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); start 545 arch/x86/mm/pageattr.c if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) { start 546 arch/x86/mm/pageattr.c res = protect_kernel_text_ro(start, end); start 547 arch/x86/mm/pageattr.c check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); start 553 arch/x86/mm/pageattr.c check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX"); start 557 arch/x86/mm/pageattr.c check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO"); start 1088 arch/x86/mm/pageattr.c static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) start 1090 arch/x86/mm/pageattr.c pte_t *pte = pte_offset_kernel(pmd, start); start 1092 arch/x86/mm/pageattr.c while (start < end) { start 1095 arch/x86/mm/pageattr.c start += PAGE_SIZE; start 1107 arch/x86/mm/pageattr.c unsigned long start, unsigned long end) start 1109 arch/x86/mm/pageattr.c if (unmap_pte_range(pmd, start, end)) start 1114 arch/x86/mm/pageattr.c static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) start 1116 arch/x86/mm/pageattr.c pmd_t *pmd = pmd_offset(pud, start); start 1121 arch/x86/mm/pageattr.c if (start & (PMD_SIZE - 1)) { start 1122 arch/x86/mm/pageattr.c unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; start 1125 arch/x86/mm/pageattr.c __unmap_pmd_range(pud, pmd, start, pre_end); start 1127 arch/x86/mm/pageattr.c start = pre_end; start 1134 arch/x86/mm/pageattr.c while (end - start >= PMD_SIZE) { start 1138 arch/x86/mm/pageattr.c __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); start 1140 arch/x86/mm/pageattr.c start += PMD_SIZE; start 1147 arch/x86/mm/pageattr.c if (start < end) start 1148 arch/x86/mm/pageattr.c return __unmap_pmd_range(pud, pmd, start, end); start 1158 arch/x86/mm/pageattr.c static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) start 1160 arch/x86/mm/pageattr.c pud_t *pud = pud_offset(p4d, start); start 1165 arch/x86/mm/pageattr.c if (start & (PUD_SIZE - 1)) { start 1166 arch/x86/mm/pageattr.c unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; start 1169 arch/x86/mm/pageattr.c unmap_pmd_range(pud, start, pre_end); start 1171 arch/x86/mm/pageattr.c start = pre_end; start 1178 arch/x86/mm/pageattr.c while (end - start >= PUD_SIZE) { start 1183 arch/x86/mm/pageattr.c unmap_pmd_range(pud, start, start + PUD_SIZE); start 1185 arch/x86/mm/pageattr.c start += PUD_SIZE; start 1192 arch/x86/mm/pageattr.c if (start < end) start 1193 arch/x86/mm/pageattr.c unmap_pmd_range(pud, start, end); start 1222 arch/x86/mm/pageattr.c unsigned long start, unsigned long end, start 1227 arch/x86/mm/pageattr.c pte = pte_offset_kernel(pmd, start); start 1231 arch/x86/mm/pageattr.c while (num_pages-- && start < end) { start 1234 arch/x86/mm/pageattr.c start += PAGE_SIZE; start 1241 arch/x86/mm/pageattr.c unsigned long start, unsigned long end, start 1251 arch/x86/mm/pageattr.c if (start & (PMD_SIZE - 1)) { start 1252 arch/x86/mm/pageattr.c unsigned long pre_end = start + (num_pages << PAGE_SHIFT); start 1253 arch/x86/mm/pageattr.c unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; start 1256 arch/x86/mm/pageattr.c cur_pages = (pre_end - start) >> PAGE_SHIFT; start 1262 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, start); start 1267 arch/x86/mm/pageattr.c populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); start 1269 arch/x86/mm/pageattr.c start = pre_end; start 1280 arch/x86/mm/pageattr.c while (end - start >= PMD_SIZE) { start 1289 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, start); start 1294 arch/x86/mm/pageattr.c start += PMD_SIZE; start 1302 arch/x86/mm/pageattr.c if (start < end) { start 1303 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, start); start 1308 arch/x86/mm/pageattr.c populate_pte(cpa, start, end, num_pages - cur_pages, start 1314 arch/x86/mm/pageattr.c static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, start 1322 arch/x86/mm/pageattr.c end = start + (cpa->numpages << PAGE_SHIFT); start 1328 arch/x86/mm/pageattr.c if (start & (PUD_SIZE - 1)) { start 1330 arch/x86/mm/pageattr.c unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; start 1333 arch/x86/mm/pageattr.c cur_pages = (pre_end - start) >> PAGE_SHIFT; start 1336 arch/x86/mm/pageattr.c pud = pud_offset(p4d, start); start 1345 arch/x86/mm/pageattr.c cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, start 1350 arch/x86/mm/pageattr.c start = pre_end; start 1357 arch/x86/mm/pageattr.c pud = pud_offset(p4d, start); start 1363 arch/x86/mm/pageattr.c while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { start 1367 arch/x86/mm/pageattr.c start += PUD_SIZE; start 1374 arch/x86/mm/pageattr.c if (start < end) { start 1377 arch/x86/mm/pageattr.c pud = pud_offset(p4d, start); start 1382 arch/x86/mm/pageattr.c tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, start 2009 arch/x86/mm/pageattr.c unsigned long start; start 2019 arch/x86/mm/pageattr.c start = page_to_pfn(pages[i]) << PAGE_SHIFT; start 2020 arch/x86/mm/pageattr.c end = start + PAGE_SIZE; start 2021 arch/x86/mm/pageattr.c if (reserve_memtype(start, end, new_type, NULL)) start 2045 arch/x86/mm/pageattr.c start = page_to_pfn(pages[i]) << PAGE_SHIFT; start 2046 arch/x86/mm/pageattr.c end = start + PAGE_SIZE; start 2047 arch/x86/mm/pageattr.c free_memtype(start, end); start 2081 arch/x86/mm/pageattr.c unsigned long start; start 2094 arch/x86/mm/pageattr.c start = page_to_pfn(pages[i]) << PAGE_SHIFT; start 2095 arch/x86/mm/pageattr.c end = start + PAGE_SIZE; start 2096 arch/x86/mm/pageattr.c free_memtype(start, end); start 386 arch/x86/mm/pat.c static unsigned long pat_x_mtrr_type(u64 start, u64 end, start 396 arch/x86/mm/pat.c mtrr_type = mtrr_type_lookup(start, end, &uniform); start 424 arch/x86/mm/pat.c static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) start 427 arch/x86/mm/pat.c unsigned long start_pfn = start >> PAGE_SHIFT; start 460 arch/x86/mm/pat.c static int reserve_ram_pages_type(u64 start, u64 end, start 479 arch/x86/mm/pat.c for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { start 486 arch/x86/mm/pat.c start, end - 1, type, req_type); start 497 arch/x86/mm/pat.c for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { start 504 arch/x86/mm/pat.c static int free_ram_pages_type(u64 start, u64 end) start 509 arch/x86/mm/pat.c for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { start 545 arch/x86/mm/pat.c int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, start 553 arch/x86/mm/pat.c start = sanitize_phys(start); start 555 arch/x86/mm/pat.c if (start >= end) { start 557 arch/x86/mm/pat.c start, end - 1, cattr_name(req_type)); start 569 arch/x86/mm/pat.c if (x86_platform.is_untracked_pat_range(start, end)) { start 581 arch/x86/mm/pat.c actual_type = pat_x_mtrr_type(start, end, req_type); start 586 arch/x86/mm/pat.c is_range_ram = pat_pagerange_is_ram(start, end); start 589 arch/x86/mm/pat.c err = reserve_ram_pages_type(start, end, req_type, new_type); start 600 arch/x86/mm/pat.c new->start = start; start 609 arch/x86/mm/pat.c start, end - 1, start 620 arch/x86/mm/pat.c start, end - 1, cattr_name(new->type), cattr_name(req_type), start 626 arch/x86/mm/pat.c int free_memtype(u64 start, u64 end) start 635 arch/x86/mm/pat.c start = sanitize_phys(start); start 639 arch/x86/mm/pat.c if (x86_platform.is_untracked_pat_range(start, end)) start 642 arch/x86/mm/pat.c is_range_ram = pat_pagerange_is_ram(start, end); start 645 arch/x86/mm/pat.c err = free_ram_pages_type(start, end); start 653 arch/x86/mm/pat.c entry = rbt_memtype_erase(start, end); start 658 arch/x86/mm/pat.c current->comm, current->pid, start, end - 1); start 664 arch/x86/mm/pat.c dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); start 735 arch/x86/mm/pat.c int io_reserve_memtype(resource_size_t start, resource_size_t end, start 738 arch/x86/mm/pat.c resource_size_t size = end - start; start 743 arch/x86/mm/pat.c WARN_ON_ONCE(iomem_map_sanity_check(start, size)); start 745 arch/x86/mm/pat.c ret = reserve_memtype(start, end, req_type, &new_type); start 749 arch/x86/mm/pat.c if (!is_new_memtype_allowed(start, size, req_type, new_type)) start 752 arch/x86/mm/pat.c if (kernel_map_sync_memtype(start, size, new_type) < 0) start 759 arch/x86/mm/pat.c free_memtype(start, end); start 770 arch/x86/mm/pat.c void io_free_memtype(resource_size_t start, resource_size_t end) start 772 arch/x86/mm/pat.c free_memtype(start, end); start 775 arch/x86/mm/pat.c int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) start 779 arch/x86/mm/pat.c return io_reserve_memtype(start, start + size, &type); start 783 arch/x86/mm/pat.c void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) start 785 arch/x86/mm/pat.c io_free_memtype(start, start + size); start 1148 arch/x86/mm/pat.c print_entry->start, print_entry->end); start 1155 arch/x86/mm/pat.c .start = memtype_seq_start, start 11 arch/x86/mm/pat_internal.h u64 start; start 34 arch/x86/mm/pat_internal.h extern struct memtype *rbt_memtype_erase(u64 start, u64 end); start 41 arch/x86/mm/pat_internal.h static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) start 39 arch/x86/mm/pat_rbtree.c static int is_node_overlap(struct memtype *node, u64 start, u64 end) start 41 arch/x86/mm/pat_rbtree.c if (node->start >= end || node->end <= start) start 64 arch/x86/mm/pat_rbtree.c u64 start, u64 end) start 72 arch/x86/mm/pat_rbtree.c if (get_subtree_max_end(node->rb_left) > start) { start 75 arch/x86/mm/pat_rbtree.c } else if (is_node_overlap(data, start, end)) { start 78 arch/x86/mm/pat_rbtree.c } else if (start >= data->start) { start 94 arch/x86/mm/pat_rbtree.c u64 start, u64 end, int match_type) start 98 arch/x86/mm/pat_rbtree.c match = memtype_rb_lowest_match(root, start, end); start 99 arch/x86/mm/pat_rbtree.c while (match != NULL && match->start < end) { start 103 arch/x86/mm/pat_rbtree.c (match->start == start) && (match->end == end)) start 107 arch/x86/mm/pat_rbtree.c (match->start < start) && (match->end == end)) start 121 arch/x86/mm/pat_rbtree.c u64 start, u64 end, start 129 arch/x86/mm/pat_rbtree.c match = memtype_rb_lowest_match(&memtype_rbroot, start, end); start 136 arch/x86/mm/pat_rbtree.c dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); start 143 arch/x86/mm/pat_rbtree.c if (match->start >= end) /* Checked all possible matches */ start 146 arch/x86/mm/pat_rbtree.c if (is_node_overlap(match, start, end) && start 161 arch/x86/mm/pat_rbtree.c current->comm, current->pid, start, end, start 177 arch/x86/mm/pat_rbtree.c if (newdata->start <= data->start) start 179 arch/x86/mm/pat_rbtree.c else if (newdata->start > data->start) start 193 arch/x86/mm/pat_rbtree.c err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end, start 206 arch/x86/mm/pat_rbtree.c struct memtype *rbt_memtype_erase(u64 start, u64 end) start 217 arch/x86/mm/pat_rbtree.c data = memtype_rb_match(&memtype_rbroot, start, end, start 220 arch/x86/mm/pat_rbtree.c data = memtype_rb_match(&memtype_rbroot, start, end, start 226 arch/x86/mm/pat_rbtree.c if (data->start == start) { start 234 arch/x86/mm/pat_rbtree.c data->end = start; start 304 arch/x86/mm/pti.c pti_clone_pgtable(unsigned long start, unsigned long end, start 313 arch/x86/mm/pti.c for (addr = start; addr < end;) { start 321 arch/x86/mm/pti.c if (addr < start) start 481 arch/x86/mm/pti.c unsigned long start, end; start 483 arch/x86/mm/pti.c start = CPU_ENTRY_AREA_BASE; start 484 arch/x86/mm/pti.c end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); start 486 arch/x86/mm/pti.c pti_clone_pgtable(start, end, PTI_CLONE_PMD); start 575 arch/x86/mm/pti.c unsigned long start = PFN_ALIGN(_text); start 589 arch/x86/mm/pti.c pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); start 598 arch/x86/mm/pti.c set_memory_global(start, (end_global - start) >> PAGE_SHIFT); start 609 arch/x86/mm/pti.c unsigned long start = PFN_ALIGN(_text); start 617 arch/x86/mm/pti.c set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); start 616 arch/x86/mm/tlb.c unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift; start 617 arch/x86/mm/tlb.c unsigned long addr = f->start; start 671 arch/x86/mm/tlb.c (info->end - info->start) >> PAGE_SHIFT); start 733 arch/x86/mm/tlb.c unsigned long start, unsigned long end, start 748 arch/x86/mm/tlb.c info->start = start; start 767 arch/x86/mm/tlb.c void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, start 779 arch/x86/mm/tlb.c ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) { start 780 arch/x86/mm/tlb.c start = 0; start 787 arch/x86/mm/tlb.c info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables, start 823 arch/x86/mm/tlb.c for (addr = f->start; addr < f->end; addr += PAGE_SIZE) start 827 arch/x86/mm/tlb.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 831 arch/x86/mm/tlb.c (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { start 837 arch/x86/mm/tlb.c info = get_flush_tlb_info(NULL, start, end, 0, false, 0); start 854 arch/x86/mm/tlb.c .start = 0, start 98 arch/x86/oprofile/nmi_int.c model->start(msrs); start 757 arch/x86/oprofile/nmi_int.c ops->start = nmi_start; start 536 arch/x86/oprofile/op_model_amd.c .start = &op_amd_start, start 708 arch/x86/oprofile/op_model_p4.c .start = &p4_start, start 720 arch/x86/oprofile/op_model_p4.c .start = &p4_start, start 194 arch/x86/oprofile/op_model_ppro.c .start = &ppro_start, start 242 arch/x86/oprofile/op_model_ppro.c .start = &ppro_start, start 48 arch/x86/oprofile/op_x86_model.h void (*start)(struct op_msrs const * const msrs); start 197 arch/x86/pci/acpi.c info->start_bus = (u8)root->secondary.start; start 247 arch/x86/pci/acpi.c int busnum = root->secondary.start; start 291 arch/x86/pci/acpi.c res->start == 0xCF8 && res->end == 0xCFF; start 297 arch/x86/pci/acpi.c int busnum = ci->root->secondary.start; start 329 arch/x86/pci/acpi.c int busnum = root->secondary.start; start 71 arch/x86/pci/amd_bus.c u64 start; start 157 arch/x86/pci/amd_bus.c start = reg & 0xfff000; start 168 arch/x86/pci/amd_bus.c node, link, start, end); start 173 arch/x86/pci/amd_bus.c update_res(info, start, end, IORESOURCE_IO, 1); start 174 arch/x86/pci/amd_bus.c subtract_range(range, RANGE_NUM, start, end + 1); start 184 arch/x86/pci/amd_bus.c update_res(info, range[i].start, range[i].end - 1, start 208 arch/x86/pci/amd_bus.c fam10h_mmconf_start = fam10h_mmconf->start; start 223 arch/x86/pci/amd_bus.c start = reg & 0xffffff00; /* 39:16 on 31:8*/ start 224 arch/x86/pci/amd_bus.c start <<= 8; start 238 arch/x86/pci/amd_bus.c node, link, start, end); start 246 arch/x86/pci/amd_bus.c if (start >= fam10h_mmconf_start && start 247 arch/x86/pci/amd_bus.c start <= fam10h_mmconf_end) { start 248 arch/x86/pci/amd_bus.c start = fam10h_mmconf_end + 1; start 258 arch/x86/pci/amd_bus.c if (start < fam10h_mmconf_start && start 262 arch/x86/pci/amd_bus.c update_res(info, start, endx, IORESOURCE_MEM, 0); start 263 arch/x86/pci/amd_bus.c subtract_range(range, RANGE_NUM, start, start 265 arch/x86/pci/amd_bus.c printk(KERN_CONT " ==> [%llx, %llx]", start, endx); start 266 arch/x86/pci/amd_bus.c start = fam10h_mmconf_end + 1; start 270 arch/x86/pci/amd_bus.c if (start <= end) { start 271 arch/x86/pci/amd_bus.c printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end); start 279 arch/x86/pci/amd_bus.c update_res(info, cap_resource(start), cap_resource(end), start 281 arch/x86/pci/amd_bus.c subtract_range(range, RANGE_NUM, start, end + 1); start 309 arch/x86/pci/amd_bus.c update_res(info, cap_resource(range[i].start), start 319 arch/x86/pci/amd_bus.c busnum = info->busn.start; start 49 arch/x86/pci/broadcom_bus.c res.start = ((resource_size_t) word1 << 16) | 0x0000; start 52 arch/x86/pci/broadcom_bus.c update_res(info, res.start, res.end, res.flags, 0); start 59 arch/x86/pci/broadcom_bus.c res.start = ((resource_size_t) word1 << 16) | 0x0000; start 62 arch/x86/pci/broadcom_bus.c update_res(info, res.start, res.end, res.flags, 0); start 69 arch/x86/pci/broadcom_bus.c res.start = word1; start 72 arch/x86/pci/broadcom_bus.c update_res(info, res.start, res.end, res.flags, 0); start 76 arch/x86/pci/broadcom_bus.c res.start = fbus; start 15 arch/x86/pci/bus_numa.c if (info->busn.start == bus) start 84 arch/x86/pci/bus_numa.c info->busn.start = bus_min; start 95 arch/x86/pci/bus_numa.c void update_res(struct pci_root_info *info, resource_size_t start, start 101 arch/x86/pci/bus_numa.c if (start > end) start 104 arch/x86/pci/bus_numa.c if (start == MAX_RESOURCE) start 119 arch/x86/pci/bus_numa.c common_start = max(res->start, start); start 124 arch/x86/pci/bus_numa.c final_start = min(res->start, start); start 127 arch/x86/pci/bus_numa.c res->start = final_start; start 142 arch/x86/pci/bus_numa.c res->start = start; start 25 arch/x86/pci/bus_numa.h extern void update_res(struct pci_root_info *info, resource_size_t start, start 140 arch/x86/pci/common.c if (bar_r->start == 0 && bar_r->end != 0) { start 150 arch/x86/pci/common.c if (rom_r->start) { start 154 arch/x86/pci/common.c rom_r->start = rom_r->end = rom_r->flags = 0; start 347 arch/x86/pci/fixup.c res->start = 0xC0000; start 348 arch/x86/pci/fixup.c res->end = res->start + 0x20000 - 1; start 531 arch/x86/pci/fixup.c if (r->flags & IORESOURCE_MEM && r->start == hpet_address) { start 661 arch/x86/pci/fixup.c if (r->end == r->start + 0x7ff) { start 662 arch/x86/pci/fixup.c r->start = 0; start 742 arch/x86/pci/fixup.c res->start = 0xbd00000000ull; start 760 arch/x86/pci/fixup.c base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | start 763 arch/x86/pci/fixup.c high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) | start 159 arch/x86/pci/i386.c resource_size_t start = res->start; start 163 arch/x86/pci/i386.c return start; start 164 arch/x86/pci/i386.c if (start & 0x300) start 165 arch/x86/pci/i386.c start = (start + 0x3ff) & ~0x3ff; start 168 arch/x86/pci/i386.c if (start < BIOS_END) start 169 arch/x86/pci/i386.c start = BIOS_END; start 171 arch/x86/pci/i386.c return start; start 220 arch/x86/pci/i386.c if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) { start 227 arch/x86/pci/i386.c r->start = r->end = 0; start 245 arch/x86/pci/i386.c int start; start 264 arch/x86/pci/i386.c for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) { start 268 arch/x86/pci/i386.c if (!r->start) /* Address not assigned at all */ start 285 arch/x86/pci/i386.c idx, r->start); start 286 arch/x86/pci/i386.c r->end -= r->start; start 287 arch/x86/pci/i386.c r->start = 0; start 331 arch/x86/pci/i386.c if (!r->flags || !r->start) start 337 arch/x86/pci/i386.c r->end -= r->start; start 338 arch/x86/pci/i386.c r->start = 0; start 387 arch/x86/pci/intel_mid_pci.c dev->resource[i].end = dev->resource[i].start + size - 1; start 69 arch/x86/pci/mmconfig-shared.c static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start, start 84 arch/x86/pci/mmconfig-shared.c new->start_bus = start; start 88 arch/x86/pci/mmconfig-shared.c res->start = addr + PCI_MMCFG_BUS_OFFSET(start); start 92 arch/x86/pci/mmconfig-shared.c "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); start 98 arch/x86/pci/mmconfig-shared.c struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, start 103 arch/x86/pci/mmconfig-shared.c new = pci_mmconfig_alloc(segment, start, end, addr); start 112 arch/x86/pci/mmconfig-shared.c segment, start, end, &new->res, (unsigned long)addr); start 271 arch/x86/pci/mmconfig-shared.c int start, size_index, end; start 290 arch/x86/pci/mmconfig-shared.c start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; start 291 arch/x86/pci/mmconfig-shared.c end = start + extcfg_sizebus[size_index] - 1; start 292 arch/x86/pci/mmconfig-shared.c if (pci_mmconfig_add(0, start, end, base) == NULL) start 389 arch/x86/pci/mmconfig-shared.c if ((mcfg_res->start >= fixmem32->address) && start 406 arch/x86/pci/mmconfig-shared.c if ((mcfg_res->start >= address.address.minimum) && start 428 arch/x86/pci/mmconfig-shared.c static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used) start 432 arch/x86/pci/mmconfig-shared.c mcfg_res.start = start; start 445 arch/x86/pci/mmconfig-shared.c typedef bool (*check_reserved_t)(u64 start, u64 end, unsigned type); start 451 arch/x86/pci/mmconfig-shared.c u64 addr = cfg->res.start; start 477 arch/x86/pci/mmconfig-shared.c cfg->res.end = cfg->res.start + start 611 arch/x86/pci/mmconfig-shared.c extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size, start 614 arch/x86/pci/mmconfig-shared.c static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size, start 624 arch/x86/pci/mmconfig-shared.c rc = func(cfg->res.start, resource_size(&cfg->res), data); start 721 arch/x86/pci/mmconfig-shared.c int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, start 731 arch/x86/pci/mmconfig-shared.c if (start > end) start 735 arch/x86/pci/mmconfig-shared.c cfg = pci_mmconfig_lookup(seg, start); start 753 arch/x86/pci/mmconfig-shared.c cfg = pci_mmconfig_alloc(seg, start, end, addr); start 795 arch/x86/pci/mmconfig-shared.c int pci_mmconfig_delete(u16 seg, u8 start, u8 end) start 801 arch/x86/pci/mmconfig-shared.c if (cfg->segment == seg && cfg->start_bus == start && start 102 arch/x86/pci/mmconfig_64.c u64 start, size; start 105 arch/x86/pci/mmconfig_64.c start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus); start 108 arch/x86/pci/mmconfig_64.c addr = ioremap_nocache(start, size); start 134 arch/x86/platform/efi/efi.c unsigned long long start = md->phys_addr; start 139 arch/x86/platform/efi/efi.c memblock_mark_mirror(start, size); start 159 arch/x86/platform/efi/efi.c unsigned long long start = md->phys_addr; start 195 arch/x86/platform/efi/efi.c e820__range_add(start, size, e820_type); start 267 arch/x86/platform/efi/quirks.c mr.range.start = addr; start 306 arch/x86/platform/efi/quirks.c static __init bool can_free_region(u64 start, u64 size) start 308 arch/x86/platform/efi/quirks.c if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) start 311 arch/x86/platform/efi/quirks.c if (!e820__mapped_all(start, start+size, E820_TYPE_RAM)) start 322 arch/x86/platform/efi/quirks.c u64 start = md->phys_addr; start 330 arch/x86/platform/efi/quirks.c already_reserved = memblock_is_region_reserved(start, size); start 347 arch/x86/platform/efi/quirks.c memblock_reserve(start, size); start 354 arch/x86/platform/efi/quirks.c if (can_free_region(start, size)) start 413 arch/x86/platform/efi/quirks.c unsigned long long start = md->phys_addr; start 450 arch/x86/platform/efi/quirks.c if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { start 451 arch/x86/platform/efi/quirks.c set_real_mode_mem(start); start 452 arch/x86/platform/efi/quirks.c start += rm_size; start 456 arch/x86/platform/efi/quirks.c memblock_free_late(start, size); start 25 arch/x86/platform/goldfish/goldfish.c .start = GOLDFISH_PDEV_BUS_BASE, start 30 arch/x86/platform/goldfish/goldfish.c .start = GOLDFISH_PDEV_BUS_IRQ, start 20 arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c .start = FLIS_BASE_ADDR, start 66 arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c res->start = res->end = pentry->irq; start 23 arch/x86/platform/intel-mid/device_libs/platform_msic.c .start = INTEL_MSIC_IRQ_PHYS_BASE, start 164 arch/x86/platform/intel-mid/intel_mid_vrtc.c vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr; start 168 arch/x86/platform/intel-mid/intel_mid_vrtc.c vrtc_resources[1].start = sfi_mrtc_array[0].irq; start 320 arch/x86/platform/intel-mid/sfi.c res.start = irq; start 539 arch/x86/platform/intel-quark/imr.c unsigned long start, end; start 547 arch/x86/platform/intel-quark/imr.c start = (unsigned long)_text; start 561 arch/x86/platform/intel-quark/imr.c size / 1024, start, end); start 564 arch/x86/platform/intel-quark/imr.c size / 1024, start, end); start 314 arch/x86/platform/intel/iosf_mbi.c unsigned long start, end; start 352 arch/x86/platform/intel/iosf_mbi.c start = jiffies; start 353 arch/x86/platform/intel/iosf_mbi.c end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); start 359 arch/x86/platform/intel/iosf_mbi.c jiffies_to_msecs(jiffies - start)); start 83 arch/x86/platform/iris/iris.c .start = IRIS_GIO_BASE, start 139 arch/x86/platform/olpc/olpc-xo1-pm.c pms_base = res->start; start 141 arch/x86/platform/olpc/olpc-xo1-pm.c acpi_base = res->start; start 29 arch/x86/platform/olpc/olpc-xo1-rtc.c .start = RTC_PORT(0), start 34 arch/x86/platform/olpc/olpc-xo1-rtc.c .start = RTC_IRQ, start 549 arch/x86/platform/olpc/olpc-xo1-sci.c acpi_base = res->start; start 96 arch/x86/platform/olpc/olpc_ofw.c unsigned long start; start 112 arch/x86/platform/olpc/olpc_ofw.c start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND); start 114 arch/x86/platform/olpc/olpc_ofw.c (unsigned long)olpc_ofw_cif, (-start) >> 20); start 115 arch/x86/platform/olpc/olpc_ofw.c reserve_top_address(-start); start 1159 arch/x86/platform/uv/tlb_uv.c if (!info->end || (info->end - info->start) <= PAGE_SIZE) start 1160 arch/x86/platform/uv/tlb_uv.c address = info->start; start 1655 arch/x86/platform/uv/tlb_uv.c .start = ptc_seq_start, start 115 arch/x86/power/hibernate_64.c mstart = pfn_mapped[i].start << PAGE_SHIFT; start 49 arch/x86/purgatory/purgatory.c sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); start 89 arch/x86/realmode/init.c trampoline_header->start = __pa_symbol(startup_32_smp); start 100 arch/x86/realmode/init.c trampoline_header->start = (u64) secondary_startup_64; start 704 arch/x86/xen/enlighten_pv.c unsigned long start, end; start 710 arch/x86/xen/enlighten_pv.c start = __this_cpu_read(idt_desc.address); start 711 arch/x86/xen/enlighten_pv.c end = start + __this_cpu_read(idt_desc.size) + 1; start 717 arch/x86/xen/enlighten_pv.c if (p >= start && (p + 8) <= end) { start 1359 arch/x86/xen/mmu_pv.c trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); start 1374 arch/x86/xen/mmu_pv.c (info->end - info->start) <= PAGE_SIZE) { start 1376 arch/x86/xen/mmu_pv.c args->op.arg1.linear_addr = info->start; start 457 arch/x86/xen/setup.c phys_addr_t start = 0; start 476 arch/x86/xen/setup.c unsigned long start_pfn = PFN_DOWN(start); start 485 arch/x86/xen/setup.c start = end; start 584 arch/x86/xen/setup.c static void __init xen_align_and_add_e820_region(phys_addr_t start, start 587 arch/x86/xen/setup.c phys_addr_t end = start + size; start 591 arch/x86/xen/setup.c start = PAGE_ALIGN(start); start 603 arch/x86/xen/setup.c e820__range_add(start, end - start, type); start 617 arch/x86/xen/setup.c bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) start 626 arch/x86/xen/setup.c end = start + size; start 630 arch/x86/xen/setup.c if (entry->type == E820_TYPE_RAM && entry->addr <= start && start 651 arch/x86/xen/setup.c phys_addr_t addr, start; start 657 arch/x86/xen/setup.c start = entry->addr; start 658 arch/x86/xen/setup.c for (addr = start; addr < start + size; addr += PAGE_SIZE) { start 661 arch/x86/xen/setup.c start = addr + PAGE_SIZE; start 662 arch/x86/xen/setup.c if (start + size > entry->addr + entry->size) start 665 arch/x86/xen/setup.c if (addr >= start + size) { start 666 arch/x86/xen/setup.c memblock_reserve(start, size); start 667 arch/x86/xen/setup.c return start; start 709 arch/x86/xen/setup.c phys_addr_t start, size; start 712 arch/x86/xen/setup.c start = __pa(xen_start_info->mfn_list); start 716 arch/x86/xen/setup.c start = PFN_PHYS(xen_start_info->first_p2m_pfn); start 720 arch/x86/xen/setup.c memblock_reserve(start, size); start 721 arch/x86/xen/setup.c if (!xen_is_e820_reserved(start, size)) start 733 arch/x86/xen/setup.c memblock_free(start, size); start 894 arch/x86/xen/setup.c phys_addr_t new_area, start, size; start 902 arch/x86/xen/setup.c start = boot_params.hdr.ramdisk_image; start 904 arch/x86/xen/setup.c xen_phys_memcpy(new_area, start, size); start 906 arch/x86/xen/setup.c start, start + size, new_area, new_area + size); start 907 arch/x86/xen/setup.c memblock_free(start, size); start 47 arch/x86/xen/xen-ops.h bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); start 42 arch/xtensa/include/asm/bootparam.h unsigned long start; start 100 arch/xtensa/include/asm/cacheflush.h void flush_icache_range(unsigned long start, unsigned long end); start 119 arch/xtensa/include/asm/cacheflush.h #define flush_cache_vmap(start,end) flush_cache_all() start 120 arch/xtensa/include/asm/cacheflush.h #define flush_cache_vunmap(start,end) flush_cache_all() start 126 arch/xtensa/include/asm/cacheflush.h unsigned long start, unsigned long end); start 136 arch/xtensa/include/asm/cacheflush.h #define flush_cache_vmap(start,end) do { } while (0) start 137 arch/xtensa/include/asm/cacheflush.h #define flush_cache_vunmap(start,end) do { } while (0) start 144 arch/xtensa/include/asm/cacheflush.h #define flush_cache_range(vma, start, end) do { } while (0) start 149 arch/xtensa/include/asm/cacheflush.h #define local_flush_icache_range(start, end) \ start 151 arch/xtensa/include/asm/cacheflush.h __flush_dcache_range(start, (end) - (start)); \ start 152 arch/xtensa/include/asm/cacheflush.h __invalidate_icache_range(start,(end) - (start)); \ start 39 arch/xtensa/include/asm/delay.h unsigned long start = get_ccount(); start 43 arch/xtensa/include/asm/delay.h while (((unsigned long)get_ccount()) - start < cycles) start 24 arch/xtensa/include/asm/pci-bridge.h unsigned long start; start 65 arch/xtensa/include/asm/pci-bridge.h unsigned long start, unsigned long end, int flags, char *name) start 67 arch/xtensa/include/asm/pci-bridge.h res->start = start; start 38 arch/xtensa/include/asm/tlbflush.h unsigned long start, unsigned long end); start 39 arch/xtensa/include/asm/tlbflush.h void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); start 48 arch/xtensa/include/asm/tlbflush.h void flush_tlb_kernel_range(unsigned long start, unsigned long end); start 57 arch/xtensa/include/asm/tlbflush.h #define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \ start 45 arch/xtensa/kernel/pci.c resource_size_t start = res->start; start 54 arch/xtensa/kernel/pci.c if (start & 0x300) start 55 arch/xtensa/kernel/pci.c start = (start + 0x3ff) & ~0x3ff; start 58 arch/xtensa/kernel/pci.c return start; start 85 arch/xtensa/kernel/pci.c vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT; start 399 arch/xtensa/kernel/perf_event.c .start = xtensa_pmu_start, start 108 arch/xtensa/kernel/setup.c return memblock_add(mi->start, mi->end - mi->start); start 119 arch/xtensa/kernel/setup.c initrd_start = (unsigned long)__va(mi->start); start 312 arch/xtensa/kernel/setup.c static inline int __init_memblock mem_reserve(unsigned long start, start 315 arch/xtensa/kernel/setup.c return memblock_reserve(start, end - start); start 729 arch/xtensa/kernel/setup.c .start = c_start, start 46 arch/xtensa/kernel/smp.c static void system_invalidate_dcache_range(unsigned long start, start 48 arch/xtensa/kernel/smp.c static void system_flush_invalidate_dcache_range(unsigned long start, start 513 arch/xtensa/kernel/smp.c unsigned long start, unsigned long end) start 517 arch/xtensa/kernel/smp.c .addr1 = start, start 529 arch/xtensa/kernel/smp.c void flush_tlb_kernel_range(unsigned long start, unsigned long end) start 532 arch/xtensa/kernel/smp.c .addr1 = start, start 574 arch/xtensa/kernel/smp.c unsigned long start, unsigned long end) start 578 arch/xtensa/kernel/smp.c .addr1 = start, start 590 arch/xtensa/kernel/smp.c void flush_icache_range(unsigned long start, unsigned long end) start 593 arch/xtensa/kernel/smp.c .addr1 = start, start 608 arch/xtensa/kernel/smp.c static void system_invalidate_dcache_range(unsigned long start, start 612 arch/xtensa/kernel/smp.c .addr1 = start, start 624 arch/xtensa/kernel/smp.c static void system_flush_invalidate_dcache_range(unsigned long start, start 628 arch/xtensa/kernel/smp.c .addr1 = start, start 181 arch/xtensa/mm/cache.c unsigned long start, unsigned long end) start 97 arch/xtensa/mm/init.c unsigned long start = memblock_region_memory_base_pfn(mem); start 108 arch/xtensa/mm/init.c if (start < max_low) start 109 arch/xtensa/mm/init.c start = max_low; start 118 arch/xtensa/mm/init.c if (res_end < start) start 120 arch/xtensa/mm/init.c if (res_start < start) start 121 arch/xtensa/mm/init.c res_start = start; start 126 arch/xtensa/mm/init.c if (res_start != start) start 127 arch/xtensa/mm/init.c free_area_high(start, res_start); start 128 arch/xtensa/mm/init.c start = res_end; start 129 arch/xtensa/mm/init.c if (start == end) start 134 arch/xtensa/mm/init.c if (start < end) start 135 arch/xtensa/mm/init.c free_area_high(start, end); start 38 arch/xtensa/mm/kasan_init.c static void __init populate(void *start, void *end) start 40 arch/xtensa/mm/kasan_init.c unsigned long n_pages = (end - start) / PAGE_SIZE; start 43 arch/xtensa/mm/kasan_init.c unsigned long vaddr = (unsigned long)start; start 52 arch/xtensa/mm/kasan_init.c pr_debug("%s: %p - %p\n", __func__, start, end); start 74 arch/xtensa/mm/kasan_init.c memset(start, 0, end - start); start 89 arch/xtensa/mm/tlb.c unsigned long start, unsigned long end) start 99 arch/xtensa/mm/tlb.c (unsigned long)mm->context.asid[cpu], start, end); start 102 arch/xtensa/mm/tlb.c if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { start 106 arch/xtensa/mm/tlb.c start &= PAGE_MASK; start 108 arch/xtensa/mm/tlb.c while(start < end) { start 109 arch/xtensa/mm/tlb.c invalidate_itlb_mapping(start); start 110 arch/xtensa/mm/tlb.c invalidate_dtlb_mapping(start); start 111 arch/xtensa/mm/tlb.c start += PAGE_SIZE; start 114 arch/xtensa/mm/tlb.c while(start < end) { start 115 arch/xtensa/mm/tlb.c invalidate_dtlb_mapping(start); start 116 arch/xtensa/mm/tlb.c start += PAGE_SIZE; start 150 arch/xtensa/mm/tlb.c void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start 152 arch/xtensa/mm/tlb.c if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && start 153 arch/xtensa/mm/tlb.c end - start < _TLB_ENTRIES << PAGE_SHIFT) { start 154 arch/xtensa/mm/tlb.c start &= PAGE_MASK; start 155 arch/xtensa/mm/tlb.c while (start < end) { start 156 arch/xtensa/mm/tlb.c invalidate_itlb_mapping(start); start 157 arch/xtensa/mm/tlb.c invalidate_dtlb_mapping(start); start 158 arch/xtensa/mm/tlb.c start += PAGE_SIZE; start 125 arch/xtensa/platforms/xt2000/setup.c .start = SONIC83934_ADDR, start 130 arch/xtensa/platforms/xt2000/setup.c .start = SONIC83934_INTNUM, start 164 arch/xtensa/platforms/xtfpga/setup.c .start = OETH_REGS_PADDR, start 169 arch/xtensa/platforms/xtfpga/setup.c .start = OETH_SRAMBUFF_PADDR, start 174 arch/xtensa/platforms/xtfpga/setup.c .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), start 207 arch/xtensa/platforms/xtfpga/setup.c .start = C67X00_PADDR, start 212 arch/xtensa/platforms/xtfpga/setup.c .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), start 238 arch/xtensa/platforms/xtfpga/setup.c .start = DUART16552_PADDR, start 401 block/badblocks.c sector_t start = BB_OFFSET(p[lo]); start 403 block/badblocks.c p[lo] = BB_MAKE(start, s - start, ack); start 446 block/badblocks.c sector_t start = BB_OFFSET(p[i]); start 449 block/badblocks.c p[i] = BB_MAKE(start, len, 1); start 154 block/bfq-iosched.h u64 start, finish; start 117 block/bfq-wf2q.c !bfq_gt(new_entity->start, st->vtime) start 295 block/bfq-wf2q.c entity->finish = entity->start + start 304 block/bfq-wf2q.c entity->start, entity->finish, start 429 block/bfq-wf2q.c entity->min_start = entity->start; start 819 block/bfq-wf2q.c entity->start = new_st->vtime; start 956 block/bfq-wf2q.c entity->start += delta; start 995 block/bfq-wf2q.c entity->start = bfq_gt(min_vstart, entity->finish) ? start 1003 block/bfq-wf2q.c entity->start = min_vstart; start 1076 block/bfq-wf2q.c entity->start = entity->finish; start 1367 block/bfq-wf2q.c if (!bfq_gt(entry->start, vtime)) start 205 block/bio-integrity.c unsigned long start, end; start 245 block/bio-integrity.c start = ((unsigned long) buf) >> PAGE_SHIFT; start 246 block/bio-integrity.c nr_pages = end - start; start 526 block/bio.c void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) start 532 block/bio.c __bio_for_each_segment(bv, bio, iter, start) { start 1522 block/bio.c unsigned long start = kaddr >> PAGE_SHIFT; start 1523 block/bio.c const int nr_pages = end - start; start 1604 block/bio.c unsigned long start = kaddr >> PAGE_SHIFT; start 1612 block/bio.c if (end < start) start 1615 block/bio.c nr_pages = end - start; start 489 block/blk-iolatency.c u64 start = bio_issue_time(issue); start 498 block/blk-iolatency.c if (now <= start) start 501 block/blk-iolatency.c req_time = now - start; start 69 block/blk-mq-debugfs.c .start = queue_requeue_list_start, start 382 block/blk-mq-debugfs.c .start = hctx_dispatch_start, start 652 block/blk-mq-debugfs.c .start = ctx_##name##_rq_list_start, \ start 1034 block/blk-mq.c struct blk_mq_ctx *start) start 1036 block/blk-mq.c unsigned off = start ? start->index_hw[hctx->type] : 0; start 49 block/blk-mq.h struct blk_mq_ctx *start); start 498 block/blk-settings.c sector_t start) start 527 block/blk-settings.c alignment = queue_limit_alignment_offset(b, start); start 591 block/blk-settings.c alignment = queue_limit_discard_alignment(b, start); start 633 block/blk-settings.c sector_t start) start 637 block/blk-settings.c start += get_start_sect(bdev); start 639 block/blk-settings.c return blk_stack_limits(t, &bq->limits, start); start 760 block/blk-throttle.c bool rw, unsigned long start) start 771 block/blk-throttle.c if (time_after_eq(start, tg->slice_start[rw])) start 772 block/blk-throttle.c tg->slice_start[rw] = start; start 108 block/blk-zoned.c if (rep->start < offset) start 111 block/blk-zoned.c rep->start -= offset; start 112 block/blk-zoned.c if (rep->start + rep->len > bdev->bd_part->nr_sects) start 116 block/blk-zoned.c rep->wp = rep->start + rep->len; start 49 block/compat_ioctl.c u32 start; start 68 block/compat_ioctl.c geo.start = get_start_sect(bdev); start 74 block/compat_ioctl.c ret |= put_user(geo.start, &ugeo->start); start 1078 block/genhd.c .start = show_partition_start, start 1414 block/genhd.c .start = disk_seqf_start, start 22 block/ioctl.c long long start, length; start 39 block/ioctl.c start = p.start >> 9; start 44 block/ioctl.c long pstart = start, plength = length; start 45 block/ioctl.c if (pstart != start || plength != length start 50 block/ioctl.c if (p.start & (bdev_logical_block_size(bdev) - 1)) start 59 block/ioctl.c if (!(start + length <= part->start_sect || start 60 block/ioctl.c start >= part->start_sect + part->nr_sects)) { start 69 block/ioctl.c part = add_partition(disk, partno, start, length, start 101 block/ioctl.c start = p.start >> 9; start 107 block/ioctl.c long pstart = start, plength = length; start 108 block/ioctl.c if (pstart != start || plength != length start 122 block/ioctl.c if (start != part->start_sect) { start 134 block/ioctl.c !(start + length <= lpart->start_sect || start 135 block/ioctl.c start >= lpart->start_sect + lpart->nr_sects) start 205 block/ioctl.c uint64_t start, len; start 219 block/ioctl.c start = range[0]; start 222 block/ioctl.c if (start & 511) start 227 block/ioctl.c if (start + len > i_size_read(bdev->bd_inode)) start 229 block/ioctl.c truncate_inode_pages_range(mapping, start, start + len - 1); start 230 block/ioctl.c return blkdev_issue_discard(bdev, start >> 9, len >> 9, start 239 block/ioctl.c uint64_t start, end, len; start 247 block/ioctl.c start = range[0]; start 249 block/ioctl.c end = start + len - 1; start 251 block/ioctl.c if (start & 511) start 257 block/ioctl.c if (end < start) start 262 block/ioctl.c truncate_inode_pages_range(mapping, start, end); start 264 block/ioctl.c return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, start 476 block/ioctl.c geo.start = get_start_sect(bdev); start 935 block/kyber-iosched.c .start = kyber_##name##_rqs_start, \ start 687 block/mq-deadline.c .start = deadline_##name##_fifo_start, \ start 754 block/mq-deadline.c .start = deadline_dispatch_start, start 184 block/partition-generic.c static DEVICE_ATTR(start, 0444, part_start_show, NULL); start 310 block/partition-generic.c sector_t start, sector_t len, int flags, start 341 block/partition-generic.c p->start_sect = start; start 343 block/partition-generic.c queue_limit_alignment_offset(&disk->queue->limits, start); start 345 block/partition-generic.c queue_limit_discard_alignment(&disk->queue->limits, start); start 56 block/partitions/acorn.c __le32 start; start 96 block/partitions/acorn.c le32_to_cpu(rr->part[part].start), start 305 block/partitions/acorn.c __le32 start; start 373 block/partitions/acorn.c u32 start = le32_to_cpu(p->start); start 393 block/partitions/acorn.c if (size > 1 && adfspart_check_ICSLinux(state, start)) { start 394 block/partitions/acorn.c start += 1; start 400 block/partitions/acorn.c put_partition(state, slot++, start, size); start 413 block/partitions/acorn.c __le32 start; start 467 block/partitions/acorn.c u32 start = le32_to_cpu(p->start); start 471 block/partitions/acorn.c put_partition(state, slot++, start, size); start 484 block/partitions/acorn.c __le32 start; start 514 block/partitions/acorn.c sector_t start = 0; start 535 block/partitions/acorn.c next = le32_to_cpu(p->start); start 537 block/partitions/acorn.c put_partition(state, slot++, start, next - start); start 538 block/partitions/acorn.c start = next; start 545 block/partitions/acorn.c put_partition(state, slot++, start, size - start); start 697 block/partitions/efi.c u64 start = le64_to_cpu(ptes[i].starting_lba); start 704 block/partitions/efi.c put_partition(state, i+1, start * ssz, size * ssz); start 598 block/partitions/ldm.c part->start, part->size); start 981 block/partitions/ldm.c part->start = get_unaligned_be64(buffer + 0x24 + r_name); start 1204 block/partitions/ldm.c (v->vblk.part.start > vb->vblk.part.start)) { start 146 block/partitions/ldm.h u64 start; start 534 block/partitions/msdos.c sector_t start = start_sect(p)*sector_size; start 549 block/partitions/msdos.c put_partition(state, slot, start, n); start 552 block/partitions/msdos.c parse_extended(state, start, size, disksig); start 556 block/partitions/msdos.c put_partition(state, slot, start, size); start 36 block/partitions/sgi.c unsigned int start, blocks; start 72 block/partitions/sgi.c start = be32_to_cpu(p->first_block); start 74 block/partitions/sgi.c put_partition(state, slot, start, blocks); start 633 block/sed-opal.c u8 *start; start 635 block/sed-opal.c start = add_bytestring_header(err, cmd, len); start 636 block/sed-opal.c if (!start) start 638 block/sed-opal.c memcpy(start, bytestring, len); start 62 crypto/ablkcipher.c static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) start 64 crypto/ablkcipher.c u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); start 66 crypto/ablkcipher.c return max(start, end_page); start 112 crypto/async_tx/async_pq.c int start = -1, stop = disks - 3; start 127 crypto/async_tx/async_pq.c if (start == -1) start 128 crypto/async_tx/async_pq.c start = i; start 134 crypto/async_tx/async_pq.c if (start >= 0) start 135 crypto/async_tx/async_pq.c raid6_call.xor_syndrome(disks, start, stop, len, srcs); start 62 crypto/blkcipher.c static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) start 64 crypto/blkcipher.c u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); start 65 crypto/blkcipher.c return max(start, end_page); start 1883 crypto/drbg.c size_t start = 0; start 1889 crypto/drbg.c start = 10; start 1892 crypto/drbg.c start = 8; start 1898 crypto/drbg.c len = strlen(cra_driver_name) - start; start 1900 crypto/drbg.c if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name, start 86 crypto/proc.c .start = c_start, start 56 crypto/scatterwalk.c unsigned int start, unsigned int nbytes, int out) start 64 crypto/scatterwalk.c sg = scatterwalk_ffwd(tmp, sg, start); start 86 crypto/skcipher.c static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) start 88 crypto/skcipher.c u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); start 90 crypto/skcipher.c return max(start, end_page); start 185 crypto/tcrypt.c unsigned long start, end; start 194 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 230 crypto/tcrypt.c cycles_t start, end; start 232 crypto/tcrypt.c start = get_cycles(); start 239 crypto/tcrypt.c cycles += end - start; start 456 crypto/tcrypt.c unsigned long start, end; start 460 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 495 crypto/tcrypt.c cycles_t start, end; start 497 crypto/tcrypt.c start = get_cycles(); start 507 crypto/tcrypt.c cycles += end - start; start 746 crypto/tcrypt.c unsigned long start, end; start 755 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 791 crypto/tcrypt.c cycles_t start, end; start 793 crypto/tcrypt.c start = get_cycles(); start 800 crypto/tcrypt.c cycles += end - start; start 910 crypto/tcrypt.c unsigned long start, end; start 914 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 930 crypto/tcrypt.c unsigned long start, end; start 937 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 974 crypto/tcrypt.c cycles_t start, end; start 976 crypto/tcrypt.c start = get_cycles(); start 984 crypto/tcrypt.c cycles += end - start; start 1023 crypto/tcrypt.c cycles_t start, end; start 1025 crypto/tcrypt.c start = get_cycles(); start 1041 crypto/tcrypt.c cycles += end - start; start 1183 crypto/tcrypt.c unsigned long start, end; start 1192 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 1228 crypto/tcrypt.c cycles_t start, end; start 1230 crypto/tcrypt.c start = get_cycles(); start 1237 crypto/tcrypt.c cycles += end - start; start 1420 crypto/tcrypt.c unsigned long start, end; start 1424 crypto/tcrypt.c for (start = jiffies, end = start + secs * HZ, bcount = 0; start 1464 crypto/tcrypt.c cycles_t start, end; start 1466 crypto/tcrypt.c start = get_cycles(); start 1478 crypto/tcrypt.c cycles += end - start; start 5183 crypto/testmgr.c int start = 0; start 5186 crypto/testmgr.c while (start < end) { start 5187 crypto/testmgr.c int i = (start + end) / 2; start 5196 crypto/testmgr.c start = i + 1; start 84 drivers/acpi/acpi_amba.c dev->irq[irq_no++] = rentry->res->start; start 102 drivers/acpi/acpi_apd.c clk_data->base = devm_ioremap(&adev->dev, rentry->res->start, start 663 drivers/acpi/acpi_lpss.c pdata->mmio_base = ioremap(rentry->res->start, start 138 drivers/acpi/acpi_watchdog.c res.start = gas->address; start 139 drivers/acpi/acpi_watchdog.c res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; start 154 drivers/acpi/acpi_watchdog.c if (res.start < rentry->res->start) start 155 drivers/acpi/acpi_watchdog.c rentry->res->start = res.start; start 1053 drivers/acpi/acpica/aclocal.h u16 start; start 457 drivers/acpi/acpica/dbinput.c char *start; start 483 drivers/acpi/acpica/dbinput.c start = string; start 498 drivers/acpi/acpica/dbinput.c start = string; start 514 drivers/acpi/acpica/dbinput.c start = string; start 548 drivers/acpi/acpica/dbinput.c start = string; start 566 drivers/acpi/acpica/dbinput.c return (start); start 336 drivers/acpi/acpica/exdump.c union acpi_operand_object *start; start 439 drivers/acpi/acpica/exdump.c start = *ACPI_CAST_PTR(void *, target); start 440 drivers/acpi/acpica/exdump.c next = start; start 460 drivers/acpi/acpica/exdump.c if ((next == start) || (next == data)) { start 475 drivers/acpi/acpica/exdump.c start = *ACPI_CAST_PTR(void *, target); start 476 drivers/acpi/acpica/exdump.c next = start; start 498 drivers/acpi/acpica/exdump.c if ((next == start) || (next == data)) { start 511 drivers/acpi/acpica/exdump.c start = *ACPI_CAST_PTR(void *, target); start 512 drivers/acpi/acpica/exdump.c next = start; start 532 drivers/acpi/acpica/exdump.c if ((next == start) || (next == data)) { start 144 drivers/acpi/acpica/hwvalid.c && (last_address >= port_info->start)) { start 153 drivers/acpi/acpica/hwvalid.c port_info->start, start 96 drivers/acpi/acpica/psargs.c u8 *start = parser_state->aml; start 105 drivers/acpi/acpica/psargs.c return_PTR(start + package_length); /* end of package */ start 125 drivers/acpi/acpica/psargs.c u8 *start = parser_state->aml; start 143 drivers/acpi/acpica/psargs.c if (end == start) { start 144 drivers/acpi/acpica/psargs.c start = NULL; start 172 drivers/acpi/acpica/psargs.c return_PTR((char *)start); start 206 drivers/acpi/acpica/psargs.c u8 *start = parser_state->aml; start 249 drivers/acpi/acpica/psargs.c walk_state->parser_state.aml = start; start 261 drivers/acpi/acpica/psargs.c name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start); start 285 drivers/acpi/apei/apei-base.c unsigned long start; start 296 drivers/acpi/apei/apei-base.c unsigned long start, unsigned long size) start 299 drivers/acpi/apei/apei-base.c unsigned long end = start + size; start 301 drivers/acpi/apei/apei-base.c if (end <= start) start 305 drivers/acpi/apei/apei-base.c if (res->start > end || res->end < start) start 307 drivers/acpi/apei/apei-base.c else if (end <= res->end && start >= res->start) { start 312 drivers/acpi/apei/apei-base.c res->start = start = min(res->start, start); start 325 drivers/acpi/apei/apei-base.c res_ins->start = start; start 341 drivers/acpi/apei/apei-base.c if (res1->start >= res2->end || start 342 drivers/acpi/apei/apei-base.c res1->end <= res2->start) start 345 drivers/acpi/apei/apei-base.c res1->start >= res2->start) { start 350 drivers/acpi/apei/apei-base.c res1->start < res2->start) { start 354 drivers/acpi/apei/apei-base.c res->start = res2->end; start 356 drivers/acpi/apei/apei-base.c res1->end = res2->start; start 360 drivers/acpi/apei/apei-base.c if (res1->start < res2->start) start 361 drivers/acpi/apei/apei-base.c res1->end = res2->start; start 363 drivers/acpi/apei/apei-base.c res1->start = res2->end; start 397 drivers/acpi/apei/apei-base.c rc = apei_res_add(&resources1->iomem, res->start, start 398 drivers/acpi/apei/apei-base.c res->end - res->start); start 403 drivers/acpi/apei/apei-base.c rc = apei_res_add(&resources1->ioport, res->start, start 404 drivers/acpi/apei/apei-base.c res->end - res->start); start 413 drivers/acpi/apei/apei-base.c unsigned long start, unsigned long size, start 417 drivers/acpi/apei/apei-base.c return apei_res_add(&resources->iomem, start, size); start 419 drivers/acpi/apei/apei-base.c return apei_res_add(&resources->ioport, start, size); start 440 drivers/acpi/apei/apei-base.c static int apei_get_res_callback(__u64 start, __u64 size, void *data) start 443 drivers/acpi/apei/apei-base.c return apei_res_add(&resources->iomem, start, size); start 451 drivers/acpi/apei/apei-base.c int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size, start 501 drivers/acpi/apei/apei-base.c r = request_mem_region(res->start, res->end - res->start, start 506 drivers/acpi/apei/apei-base.c (unsigned long long)res->start, start 514 drivers/acpi/apei/apei-base.c r = request_region(res->start, res->end - res->start, desc); start 518 drivers/acpi/apei/apei-base.c (unsigned long long)res->start, start 537 drivers/acpi/apei/apei-base.c release_region(res->start, res->end - res->start); start 544 drivers/acpi/apei/apei-base.c release_mem_region(res->start, res->end - res->start); start 561 drivers/acpi/apei/apei-base.c release_mem_region(res->start, res->end - res->start); start 563 drivers/acpi/apei/apei-base.c release_region(res->start, res->end - res->start); start 111 drivers/acpi/apei/apei-internal.h unsigned long start, unsigned long size, start 1125 drivers/acpi/arm64/iort.c res->start = irq; start 1194 drivers/acpi/arm64/iort.c res[num_res].start = smmu->base_address; start 1303 drivers/acpi/arm64/iort.c res[num_res].start = smmu->base_address; start 1368 drivers/acpi/arm64/iort.c res[0].start = pmcg->page0_base_address; start 1371 drivers/acpi/arm64/iort.c res[1].start = pmcg->page1_base_address; start 100 drivers/acpi/evged.c irq = r.start; start 131 drivers/acpi/internal.h int acpi_extract_power_resources(union acpi_object *package, unsigned int start, start 167 drivers/acpi/ioapic.c if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) { start 275 drivers/acpi/irq.c res->start = rc; start 1146 drivers/acpi/nfit/core.c res->start = flush->hint_address[i]; start 1147 drivers/acpi/nfit/core.c res->end = res->start + 8 - 1; start 2826 drivers/acpi/nfit/core.c is_pmem = region_intersects(nd_res->start, resource_size(nd_res), start 2836 drivers/acpi/nfit/core.c res->start = nd_res->start; start 2876 drivers/acpi/nfit/core.c mapping->start = memdev->address; start 2888 drivers/acpi/nfit/core.c mapping->start = nfit_mem->bdw->start_address; start 2946 drivers/acpi/nfit/core.c res.start = spa->address; start 2947 drivers/acpi/nfit/core.c res.end = res.start + spa->length - 1; start 249 drivers/acpi/numa.c u64 start, end; start 266 drivers/acpi/numa.c start = ma->base_address; start 267 drivers/acpi/numa.c end = start + ma->length; start 278 drivers/acpi/numa.c if (numa_add_memblk(node, start, end) < 0) { start 280 drivers/acpi/numa.c node, (unsigned long long) start, start 289 drivers/acpi/numa.c (unsigned long long) start, (unsigned long long) end - 1, start 294 drivers/acpi/numa.c if (hotpluggable && memblock_mark_hotplug(start, ma->length)) start 296 drivers/acpi/numa.c (unsigned long long)start, (unsigned long long)end - 1); start 28 drivers/acpi/nvs.c static int suspend_nvs_register(unsigned long start, unsigned long size); start 36 drivers/acpi/nvs.c int acpi_nvs_register(__u64 start, __u64 size) start 43 drivers/acpi/nvs.c region->phys_start = start; start 47 drivers/acpi/nvs.c return suspend_nvs_register(start, size); start 50 drivers/acpi/nvs.c int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data), start 93 drivers/acpi/nvs.c static int suspend_nvs_register(unsigned long start, unsigned long size) start 98 drivers/acpi/nvs.c start, start + size - 1, size); start 108 drivers/acpi/nvs.c entry->phys_start = start; start 109 drivers/acpi/nvs.c nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); start 112 drivers/acpi/nvs.c start += entry->size; start 1474 drivers/acpi/osl.c clash = acpi_check_address_range(space_id, res->start, length, warn); start 1493 drivers/acpi/osl.c int acpi_check_region(resource_size_t start, resource_size_t n, start 1497 drivers/acpi/osl.c .start = start, start 1498 drivers/acpi/osl.c .end = start + n - 1, start 1537 drivers/acpi/osl.c if (!(mem_ctx[0]->address >= res->start && start 36 drivers/acpi/pci_mcfg.c #define MCFG_BUS_RANGE(start, end) DEFINE_RES_NAMED((start), \ start 37 drivers/acpi/pci_mcfg.c ((end) - (start) + 1), \ start 178 drivers/acpi/pci_mcfg.c if (f->cfgres.start) start 210 drivers/acpi/pci_mcfg.c if (e->segment == seg && e->bus_start <= bus_res->start && start 220 drivers/acpi/pci_mcfg.c res.start = root->mcfg_addr + (bus_res->start << 20); start 221 drivers/acpi/pci_mcfg.c res.end = res.start + (resource_size(bus_res) << 20) - 1; start 232 drivers/acpi/pci_mcfg.c if (!res.start) start 100 drivers/acpi/pci_root.c res->start = address.address.minimum; start 112 drivers/acpi/pci_root.c res->start = -1; start 118 drivers/acpi/pci_root.c if (res->start == -1) start 562 drivers/acpi/pci_root.c root->secondary.start = bus; start 564 drivers/acpi/pci_root.c root->secondary.start = 0; start 607 drivers/acpi/pci_root.c root->segment, (unsigned int)root->secondary.start); start 697 drivers/acpi/pci_root.c if (end <= res1->start) { start 720 drivers/acpi/pci_root.c res2->start = min(res1->start, res2->start); start 743 drivers/acpi/pci_root.c resource_size_t cpu_addr = res->start; start 755 drivers/acpi/pci_root.c res->start = port; start 878 drivers/acpi/pci_root.c int ret, busnum = root->secondary.start; start 128 drivers/acpi/power.c unsigned int start, unsigned int i) start 135 drivers/acpi/power.c for (j = start; j < i; j++) { start 144 drivers/acpi/power.c int acpi_extract_power_resources(union acpi_object *package, unsigned int start, start 150 drivers/acpi/power.c for (i = start; i < package->package.count; i++) { start 165 drivers/acpi/power.c if (acpi_power_resource_is_dup(package, start, i)) start 51 drivers/acpi/resource.c static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) start 53 drivers/acpi/resource.c u64 reslen = end - start + 1; start 62 drivers/acpi/resource.c if (len && reslen && start <= end) start 66 drivers/acpi/resource.c io ? "io" : "mem", start, end, len); start 76 drivers/acpi/resource.c if (!acpi_dev_resource_len_valid(res->start, res->end, len, false)) start 83 drivers/acpi/resource.c static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, start 86 drivers/acpi/resource.c res->start = start; start 87 drivers/acpi/resource.c res->end = start + len - 1; start 144 drivers/acpi/resource.c if (!acpi_dev_resource_len_valid(res->start, res->end, len, true)) start 156 drivers/acpi/resource.c static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, start 159 drivers/acpi/resource.c res->start = start; start 160 drivers/acpi/resource.c res->end = start + len - 1; start 212 drivers/acpi/resource.c u64 start, end, offset = 0; start 236 drivers/acpi/resource.c start = attr->minimum + offset; start 240 drivers/acpi/resource.c res->start = start; start 243 drivers/acpi/resource.c (offset != win->offset || start != res->start || end != res->end)) { start 385 drivers/acpi/resource.c res->start = gsi; start 426 drivers/acpi/resource.c res->start = irq; start 1434 drivers/acpi/scan.c if (rentry->res->start < dma_start) start 1435 drivers/acpi/scan.c dma_start = rentry->res->start; start 1686 drivers/acpi/scan.c if (ACPI_FAILURE(status) || res.start != spcr_uart_addr) start 1690 drivers/acpi/scan.c &res.start); start 144 drivers/amba/bus.c (unsigned long long)dev->res.start, (unsigned long long)dev->res.end, start 390 drivers/amba/bus.c tmp = ioremap(dev->res.start, size); start 668 drivers/amba/bus.c dev->res.start = base; start 795 drivers/amba/bus.c if (!request_mem_region(dev->res.start, size, name)) start 812 drivers/amba/bus.c release_mem_region(dev->res.start, size); start 254 drivers/amba/tegra-ahb.c (res->start & INCORRECT_BASE_ADDR_LOW_BYTE) == start 257 drivers/amba/tegra-ahb.c res->start -= INCORRECT_BASE_ADDR_LOW_BYTE; start 182 drivers/android/binder_alloc.c void __user *start, void __user *end) start 193 drivers/android/binder_alloc.c allocate ? "allocate" : "free", start, end); start 195 drivers/android/binder_alloc.c if (end <= start) start 198 drivers/android/binder_alloc.c trace_binder_update_page_range(alloc, allocate, start, end); start 203 drivers/android/binder_alloc.c for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { start 226 drivers/android/binder_alloc.c for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { start 293 drivers/android/binder_alloc.c if (page_addr == start) start 302 drivers/android/binder_alloc.c if (page_addr == start) start 287 drivers/android/binder_trace.h void __user *start, void __user *end), start 288 drivers/android/binder_trace.h TP_ARGS(alloc, allocate, start, end), start 298 drivers/android/binder_trace.h __entry->offset = start - alloc->buffer; start 299 drivers/android/binder_trace.h __entry->size = end - start; start 220 drivers/ata/ahci_da850.c pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res)); start 992 drivers/ata/ahci_imx.c imxpriv->phy_base = devm_ioremap(dev, phy_res->start, start 3689 drivers/ata/libata-core.c unsigned long start = jiffies; start 3695 drivers/ata/libata-core.c nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); start 3697 drivers/ata/libata-core.c nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); start 3741 drivers/ata/libata-core.c if (!warned && time_after(now, start + 5 * HZ) && start 6954 drivers/ata/libata-core.c char *start = *cur, *p = *cur; start 6971 drivers/ata/libata-core.c p = strchr(start, ':'); start 6973 drivers/ata/libata-core.c val = strstrip(start); start 6978 drivers/ata/libata-core.c id = strstrip(start); start 275 drivers/ata/libata-eh.c unsigned long long start, len; start 282 drivers/ata/libata-eh.c start = (unsigned long long)pci_resource_start(pdev, bar); start 286 drivers/ata/libata-eh.c ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); start 289 drivers/ata/libata-eh.c start + (unsigned long long)offset); start 3667 drivers/ata/libata-scsi.c u64 size, start, wp; start 3676 drivers/ata/libata-scsi.c start = get_unaligned_le64(&rec[16]); start 3681 drivers/ata/libata-scsi.c put_unaligned_be64(start, &rec[16]); start 805 drivers/ata/pata_arasan_cf.c if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), start 827 drivers/ata/pata_arasan_cf.c acdev->pbase = res->start; start 828 drivers/ata/pata_arasan_cf.c acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, start 890 drivers/ata/pata_arasan_cf.c (unsigned long long) res->start, acdev->vbase); start 277 drivers/ata/pata_atp867x.c unsigned long start, len; start 281 drivers/ata/pata_atp867x.c start = pci_resource_start(pdev, i); start 284 drivers/ata/pata_atp867x.c start, len); start 166 drivers/ata/pata_buddha.c board = z->resource.start; start 230 drivers/ata/pata_ep93xx.c unsigned long start = (1250 + 35) / 25 - t2; start 231 drivers/ata/pata_ep93xx.c unsigned long counter = start; start 235 drivers/ata/pata_ep93xx.c return start - counter; start 950 drivers/ata/pata_ep93xx.c drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN; start 951 drivers/ata/pata_ep93xx.c drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT; start 501 drivers/ata/pata_ftide010.c (res->start == 0x63400000)); start 146 drivers/ata/pata_gayle.c if (!devm_request_mem_region(&pdev->dev, res->start, start 182 drivers/ata/pata_imx.c (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA, start 183 drivers/ata/pata_imx.c (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL); start 161 drivers/ata/pata_ixp4xx_cf.c data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000); start 162 drivers/ata/pata_ixp4xx_cf.c data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000); start 181 drivers/ata/pata_ixp4xx_cf.c ixp4xx_setup_port(ap, data, cs0->start, cs1->start); start 699 drivers/ata/pata_mpc52xx.c if (!devm_request_mem_region(&op->dev, res_mem.start, start 705 drivers/ata/pata_mpc52xx.c ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs)); start 747 drivers/ata/pata_mpc52xx.c priv->ata_regs_pa = res_mem.start; start 785 drivers/ata/pata_mpc52xx.c rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start, start 894 drivers/ata/pata_octeon_cf.c cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start, start 912 drivers/ata/pata_octeon_cf.c cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, start 928 drivers/ata/pata_octeon_cf.c cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, start 211 drivers/ata/pata_pcmcia.c io_base = pdev->resource[0]->start; start 213 drivers/ata/pata_pcmcia.c ctl_base = pdev->resource[1]->start; start 215 drivers/ata/pata_pcmcia.c ctl_base = pdev->resource[0]->start + 0x0e; start 115 drivers/ata/pata_platform.c if (irq_res && irq_res->start > 0) { start 116 drivers/ata/pata_platform.c irq = irq_res->start; start 152 drivers/ata/pata_platform.c ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, start 154 drivers/ata/pata_platform.c ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, start 157 drivers/ata/pata_platform.c ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, start 159 drivers/ata/pata_platform.c ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, start 172 drivers/ata/pata_platform.c (unsigned long long)io_res->start, start 173 drivers/ata/pata_platform.c (unsigned long long)ctl_res->start); start 222 drivers/ata/pata_pxa.c ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, start 224 drivers/ata/pata_pxa.c ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, start 226 drivers/ata/pata_pxa.c ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, start 267 drivers/ata/pata_pxa.c config.src_addr = dma_res->start; start 268 drivers/ata/pata_pxa.c config.dst_addr = dma_res->start; start 288 drivers/ata/pata_pxa.c ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt, start 143 drivers/ata/pata_rb532_cf.c info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, start 571 drivers/ata/pata_samsung_cf.c (unsigned long long)res->start); start 1229 drivers/ata/sata_dwc_460ex.c hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr); start 491 drivers/ata/sata_highbank.c hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); start 1249 drivers/ata/sata_mv.c static void mv_dump_mem(void __iomem *start, unsigned bytes) start 1253 drivers/ata/sata_mv.c DPRINTK("%p: ", start + b); start 1255 drivers/ata/sata_mv.c printk("%08x ", readl(start + b)); start 4118 drivers/ata/sata_mv.c hpriv->base = devm_ioremap(&pdev->dev, res->start, start 406 drivers/atm/ambassador.c static int check_area (void * start, size_t length) { start 411 drivers/atm/ambassador.c u32 startaddress = virt_to_bus (start); start 564 drivers/atm/ambassador.c ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit); start 598 drivers/atm/ambassador.c ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit); start 628 drivers/atm/ambassador.c txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); start 659 drivers/atm/ambassador.c txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit); start 685 drivers/atm/ambassador.c rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); start 713 drivers/atm/ambassador.c rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit); start 1536 drivers/atm/ambassador.c cq->ptrs.start = cmd; start 1556 drivers/atm/ambassador.c txq->in.start = in; start 1563 drivers/atm/ambassador.c txq->out.start = out; start 1585 drivers/atm/ambassador.c rxq->in.start = in; start 1592 drivers/atm/ambassador.c rxq->out.start = out; start 1613 drivers/atm/ambassador.c void * memory = dev->cq.ptrs.start; start 1836 drivers/atm/ambassador.c lb->payload.start = cpu_to_be32 (address); start 1979 drivers/atm/ambassador.c a.command_start = bus_addr (dev->cq.ptrs.start); start 1981 drivers/atm/ambassador.c a.tx_start = bus_addr (dev->txq.in.start); start 1983 drivers/atm/ambassador.c a.txcom_start = bus_addr (dev->txq.out.start); start 1988 drivers/atm/ambassador.c a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start); start 1990 drivers/atm/ambassador.c a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start); start 340 drivers/atm/ambassador.h __be32 start; start 525 drivers/atm/ambassador.h #define NEXTQ(current,start,limit) \ start 526 drivers/atm/ambassador.h ( (current)+1 < (limit) ? (current)+1 : (start) ) start 529 drivers/atm/ambassador.h command * start; start 551 drivers/atm/ambassador.h tx_in * start; start 556 drivers/atm/ambassador.h tx_out * start; start 569 drivers/atm/ambassador.h rx_in * start; start 574 drivers/atm/ambassador.h rx_out * start; start 173 drivers/atm/eni.c eni_dev->free_list[i].start, start 202 drivers/atm/eni.c static void eni_put_free(struct eni_dev *eni_dev, void __iomem *start, start 208 drivers/atm/eni.c DPRINTK("init 0x%lx+%ld(0x%lx)\n",start,size,size); start 209 drivers/atm/eni.c start += eni_dev->base_diff; start 215 drivers/atm/eni.c start,size); start 218 drivers/atm/eni.c for (order = 0; !(((unsigned long)start | size) & (1 << order)); order++); start 224 drivers/atm/eni.c list[len].start = (void __iomem *) start; start 227 drivers/atm/eni.c start += 1 << order; start 238 drivers/atm/eni.c void __iomem *start; start 261 drivers/atm/eni.c start = list[index].start-eni_dev->base_diff; start 265 drivers/atm/eni.c eni_put_free(eni_dev,start+*size,(1 << best_order)-*size); start 266 drivers/atm/eni.c DPRINTK("%ld bytes (order %d) at 0x%lx\n",*size,order,start); start 267 drivers/atm/eni.c memset_io(start,0,*size); /* never leak data */ start 269 drivers/atm/eni.c return start; start 273 drivers/atm/eni.c static void eni_free_mem(struct eni_dev *eni_dev, void __iomem *start, start 279 drivers/atm/eni.c start += eni_dev->base_diff; start 283 drivers/atm/eni.c DPRINTK("eni_free_mem: %p+0x%lx (order %d)\n",start,size,order); start 285 drivers/atm/eni.c if (((unsigned long) list[i].start) == ((unsigned long)start^(1 << order)) && start 288 drivers/atm/eni.c list[i].start,start,1 << order,list[i].order,order); start 290 drivers/atm/eni.c start = (void __iomem *) ((unsigned long) start & ~(unsigned long) (1 << order)); start 296 drivers/atm/eni.c printk(KERN_ALERT "eni_free_mem overflow (%p,%d)\n",start, start 300 drivers/atm/eni.c list[len].start = start; start 1876 drivers/atm/eni.c error = dev->phy->start(dev); start 2213 drivers/atm/eni.c fe->start-offset,fe->start-offset+(1 << fe->order)-1, start 37 drivers/atm/eni.h void __iomem *start; /* counting in bytes */ start 2569 drivers/atm/fore200e.c fore200e->phys_base = op->resource[0].start; start 1480 drivers/atm/he.c if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start) start 1481 drivers/atm/he.c he_dev->atm_dev->phy->start(he_dev->atm_dev); start 351 drivers/atm/idt77105.c .start = idt77105_start, start 3693 drivers/atm/idt77252.c if (dev->phy->start) start 3694 drivers/atm/idt77252.c dev->phy->start(dev); start 1198 drivers/atm/iphase.c wr_ptr = iadev->rx_dle_q.start; start 1290 drivers/atm/iphase.c cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4)); start 1356 drivers/atm/iphase.c dle = iadev->rx_dle_q.start; start 1441 drivers/atm/iphase.c iadev->rx_dle_q.start = (struct dle *)dle_addr; start 1442 drivers/atm/iphase.c iadev->rx_dle_q.read = iadev->rx_dle_q.start; start 1443 drivers/atm/iphase.c iadev->rx_dle_q.write = iadev->rx_dle_q.start; start 1636 drivers/atm/iphase.c dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start, start 1698 drivers/atm/iphase.c cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4)); start 1706 drivers/atm/iphase.c if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) { start 1740 drivers/atm/iphase.c dle = iadev->tx_dle_q.start; start 1928 drivers/atm/iphase.c iadev->tx_dle_q.start = (struct dle*)dle_addr; start 1929 drivers/atm/iphase.c iadev->tx_dle_q.read = iadev->tx_dle_q.start; start 1930 drivers/atm/iphase.c iadev->tx_dle_q.write = iadev->tx_dle_q.start; start 2216 drivers/atm/iphase.c dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start, start 2493 drivers/atm/iphase.c dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start, start 2500 drivers/atm/iphase.c dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start, start 2586 drivers/atm/iphase.c if (dev->phy->start) { start 2587 drivers/atm/iphase.c error = dev->phy->start(dev); start 3034 drivers/atm/iphase.c wr_ptr = iadev->tx_dle_q.start; start 3047 drivers/atm/iphase.c wr_ptr = iadev->tx_dle_q.start; start 292 drivers/atm/iphase.h struct dle *start; start 196 drivers/atm/lanai.c u32 *start; /* From get_free_pages */ start 345 drivers/atm/lanai.c buf->start = dma_alloc_coherent(&pci->dev, start 347 drivers/atm/lanai.c if (buf->start != NULL) { /* Success */ start 352 drivers/atm/lanai.c buf->ptr = buf->start; start 354 drivers/atm/lanai.c (&((unsigned char *) buf->start)[size]); start 355 drivers/atm/lanai.c memset(buf->start, 0, size); start 365 drivers/atm/lanai.c return ((unsigned long) buf->end) - ((unsigned long) buf->start); start 371 drivers/atm/lanai.c if (buf->start != NULL) { start 373 drivers/atm/lanai.c buf->start, buf->dmaaddr); start 374 drivers/atm/lanai.c buf->start = buf->end = buf->ptr = NULL; start 823 drivers/atm/lanai.c return (lanai->aal0buf.start == NULL) ? -ENOMEM : 0; start 1139 drivers/atm/lanai.c ((unsigned long) lvcc->tx.buf.start); start 1167 drivers/atm/lanai.c (unsigned char *) lvcc->tx.buf.start; start 1171 drivers/atm/lanai.c lvcc->tx.buf.start, lvcc->tx.buf.ptr, lvcc->tx.buf.end); start 1176 drivers/atm/lanai.c lvcc->tx.buf.start, lvcc->tx.buf.ptr, lvcc->tx.buf.end); start 1182 drivers/atm/lanai.c lvcc->tx.buf.ptr = lvcc->tx.buf.start; start 1194 drivers/atm/lanai.c lvcc->tx.buf.ptr = lvcc->tx.buf.start; start 1208 drivers/atm/lanai.c memcpy(lvcc->tx.buf.start, src + n - m, m); start 1209 drivers/atm/lanai.c e = ((unsigned char *) lvcc->tx.buf.start) + m; start 1226 drivers/atm/lanai.c memset(lvcc->tx.buf.start, 0, m); start 1227 drivers/atm/lanai.c e = ((unsigned char *) lvcc->tx.buf.start) + m; start 1237 drivers/atm/lanai.c (unsigned char *) lvcc->tx.buf.start; start 1240 drivers/atm/lanai.c ptr, lvcc->vci, lvcc->tx.buf.start, lvcc->tx.buf.ptr, start 1372 drivers/atm/lanai.c memcpy(dest + n - m, lvcc->rx.buf.start, m); start 1383 drivers/atm/lanai.c u32 *end = &lvcc->rx.buf.start[endptr * 4]; start 1391 drivers/atm/lanai.c if ((x = &end[-2]) < lvcc->rx.buf.start) start 1488 drivers/atm/lanai.c if (unlikely(buf->start == NULL)) start 1583 drivers/atm/lanai.c if (unlikely(lanai->service.start == NULL)) start 1586 drivers/atm/lanai.c lanai->service.start, start 1674 drivers/atm/lanai.c ((unsigned long) lvcc->rx.buf.start)) + 47; start 1692 drivers/atm/lanai.c lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; start 1714 drivers/atm/lanai.c const u32 *end = lanai->service.start + wreg; start 1719 drivers/atm/lanai.c lanai->service.ptr = lanai->service.start; start 2136 drivers/atm/lanai.c raw_base = lanai->pci->resource[0].start; start 801 drivers/atm/nicstar.c if (card->atmdev->phy && card->atmdev->phy->start) start 802 drivers/atm/nicstar.c card->atmdev->phy->start(card->atmdev); start 360 drivers/atm/suni.c .start = suni_start, start 242 drivers/atm/uPD98402.c .start = uPD98402_start, start 1347 drivers/atm/zatm.c error = dev->phy->start(dev); start 284 drivers/auxdisplay/arm-charlcd.c lcd->phybase = res->start; start 291 drivers/base/bus.c int bus_for_each_dev(struct bus_type *bus, struct device *start, start 302 drivers/base/bus.c (start ? &start->p->knode_bus : NULL)); start 326 drivers/base/bus.c struct device *start, const void *data, start 336 drivers/base/bus.c (start ? &start->p->knode_bus : NULL)); start 417 drivers/base/bus.c int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, start 428 drivers/base/bus.c start ? &start->p->knode_bus : NULL); start 987 drivers/base/bus.c struct device *start, const struct device_type *type) start 991 drivers/base/bus.c if (start) start 992 drivers/base/bus.c start_knode = &start->p->knode_bus; start 281 drivers/base/class.c struct device *start, const struct device_type *type) start 285 drivers/base/class.c if (start) start 286 drivers/base/class.c start_knode = &start->p->knode_class; start 351 drivers/base/class.c int class_for_each_device(struct class *class, struct device *start, start 366 drivers/base/class.c class_dev_iter_init(&iter, class, start, NULL); start 398 drivers/base/class.c struct device *class_find_device(struct class *class, struct device *start, start 413 drivers/base/class.c class_dev_iter_init(&iter, class, start, NULL); start 810 drivers/base/core.c start: start 824 drivers/base/core.c goto start; start 837 drivers/base/core.c goto start; start 41 drivers/base/driver.c int driver_for_each_device(struct device_driver *drv, struct device *start, start 52 drivers/base/driver.c start ? &start->p->knode_driver : NULL); start 76 drivers/base/driver.c struct device *start, const void *data, start 86 drivers/base/driver.c (start ? &start->p->knode_driver : NULL)); start 692 drivers/base/memory.c int create_memory_block_devices(unsigned long start, unsigned long size) start 694 drivers/base/memory.c const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); start 695 drivers/base/memory.c unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); start 700 drivers/base/memory.c if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || start 729 drivers/base/memory.c void remove_memory_block_devices(unsigned long start, unsigned long size) start 731 drivers/base/memory.c const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); start 732 drivers/base/memory.c const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); start 736 drivers/base/memory.c if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || start 834 drivers/base/memory.c int walk_memory_blocks(unsigned long start, unsigned long size, start 837 drivers/base/memory.c const unsigned long start_block_id = phys_to_block_id(start); start 838 drivers/base/memory.c const unsigned long end_block_id = phys_to_block_id(start + size - 1); start 121 drivers/base/platform.c irqd = irq_get_irq_data(r->start); start 128 drivers/base/platform.c return r->start; start 260 drivers/base/platform.c return r->start; start 1281 drivers/base/platform.c struct device *platform_find_device_by_driver(struct device *start, start 1284 drivers/base/platform.c return bus_find_device(&platform_bus_type, start, drv, start 193 drivers/base/power/domain.c return GENPD_DEV_CALLBACK(genpd, int, start, dev); start 1129 drivers/base/power/domain.c if (genpd->dev_ops.stop && genpd->dev_ops.start && start 1188 drivers/base/power/domain.c if (genpd->dev_ops.stop && genpd->dev_ops.start && start 1222 drivers/base/power/domain.c if (genpd->dev_ops.stop && genpd->dev_ops.start && start 1247 drivers/base/power/domain.c if (genpd->dev_ops.stop && genpd->dev_ops.start && start 1306 drivers/base/power/domain.c if (genpd->dev_ops.stop && genpd->dev_ops.start && start 1811 drivers/base/power/domain.c genpd->dev_ops.start = pm_clk_resume; start 1101 drivers/base/power/wakeup.c .start = wakeup_sources_stats_seq_start, start 886 drivers/base/property.c return res.start; start 245 drivers/base/regmap/internal.h unsigned int block_base, unsigned int start, start 472 drivers/base/regmap/regcache-rbtree.c unsigned int start, end; start 487 drivers/base/regmap/regcache-rbtree.c start = (min - base_reg) / map->reg_stride; start 489 drivers/base/regmap/regcache-rbtree.c start = 0; start 498 drivers/base/regmap/regcache-rbtree.c rbnode->base_reg, start, end); start 513 drivers/base/regmap/regcache-rbtree.c unsigned int start, end; start 527 drivers/base/regmap/regcache-rbtree.c start = (min - base_reg) / map->reg_stride; start 529 drivers/base/regmap/regcache-rbtree.c start = 0; start 536 drivers/base/regmap/regcache-rbtree.c bitmap_clear(rbnode->cache_present, start, end - start); start 671 drivers/base/regmap/regcache.c unsigned int start, unsigned int end) start 676 drivers/base/regmap/regcache.c for (i = start; i < end; i++) { start 734 drivers/base/regmap/regcache.c unsigned int block_base, unsigned int start, start 743 drivers/base/regmap/regcache.c for (i = start; i < end; i++) { start 776 drivers/base/regmap/regcache.c unsigned int block_base, unsigned int start, start 781 drivers/base/regmap/regcache.c block_base, start, end); start 784 drivers/base/regmap/regcache.c block_base, start, end); start 305 drivers/base/regmap/regmap-debugfs.c char *start = buf; start 315 drivers/base/regmap/regmap-debugfs.c while (*start == ' ') start 316 drivers/base/regmap/regmap-debugfs.c start++; start 317 drivers/base/regmap/regmap-debugfs.c reg = simple_strtoul(start, &start, 16); start 318 drivers/base/regmap/regmap-debugfs.c while (*start == ' ') start 319 drivers/base/regmap/regmap-debugfs.c start++; start 320 drivers/base/regmap/regmap-debugfs.c if (kstrtoul(start, 16, &value)) start 45 drivers/bcma/driver_chipcommon_pflash.c bcma_pflash_resource.start = BCMA_SOC_FLASH2; start 15 drivers/bcma/driver_chipcommon_sflash.c .start = BCMA_SOC_FLASH2, start 160 drivers/bcma/driver_chipcommon_sflash.c bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start + start 423 drivers/bcma/driver_pci_host.c pc_host->mem_resource.start = BCMA_SOC_PCI_DMA; start 428 drivers/bcma/driver_pci_host.c pc_host->io_resource.start = 0x100; start 447 drivers/bcma/driver_pci_host.c pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; start 457 drivers/bcma/driver_pci_host.c pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; start 460 drivers/bcma/driver_pci_host.c pc_host->io_resource.start = 0x100; start 466 drivers/bcma/driver_pci_host.c pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM; start 469 drivers/bcma/driver_pci_host.c pc_host->io_resource.start = 0x480; start 518 drivers/bcma/driver_pci_host.c io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start, start 973 drivers/block/aoe/aoecmd.c d->geo.start = 0; start 1548 drivers/block/drbd/drbd_int.h sector_t start, unsigned int nr_sectors, int flags); start 1511 drivers/block/drbd/drbd_receiver.c int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags) start 1535 drivers/block/drbd/drbd_receiver.c tmp = start; start 1540 drivers/block/drbd/drbd_receiver.c tmp = start + granularity - alignment; start 1541 drivers/block/drbd/drbd_receiver.c tmp = start + granularity - sector_div(tmp, granularity); start 1543 drivers/block/drbd/drbd_receiver.c nr = tmp - start; start 1546 drivers/block/drbd/drbd_receiver.c err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0); start 1548 drivers/block/drbd/drbd_receiver.c start = tmp; start 1551 drivers/block/drbd/drbd_receiver.c err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0); start 1553 drivers/block/drbd/drbd_receiver.c start += max_discard_sectors; start 1563 drivers/block/drbd/drbd_receiver.c err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0); start 1565 drivers/block/drbd/drbd_receiver.c start += nr; start 1570 drivers/block/drbd/drbd_receiver.c err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, start 1215 drivers/block/mtip32xx/mtip32xx.c unsigned long start; start 1226 drivers/block/mtip32xx/mtip32xx.c start = jiffies; start 1235 drivers/block/mtip32xx/mtip32xx.c jiffies_to_msecs(jiffies - start)); start 2065 drivers/block/mtip32xx/mtip32xx.c u64 start = blk_rq_pos(rq); start 2091 drivers/block/mtip32xx/mtip32xx.c fis->lba_low = start & 0xFF; start 2092 drivers/block/mtip32xx/mtip32xx.c fis->lba_mid = (start >> 8) & 0xFF; start 2093 drivers/block/mtip32xx/mtip32xx.c fis->lba_hi = (start >> 16) & 0xFF; start 2094 drivers/block/mtip32xx/mtip32xx.c fis->lba_low_ex = (start >> 24) & 0xFF; start 2095 drivers/block/mtip32xx/mtip32xx.c fis->lba_mid_ex = (start >> 32) & 0xFF; start 2096 drivers/block/mtip32xx/mtip32xx.c fis->lba_hi_ex = (start >> 40) & 0xFF; start 2540 drivers/block/mtip32xx/mtip32xx.c unsigned long timeout, cnt = 0, start; start 2545 drivers/block/mtip32xx/mtip32xx.c start = jiffies; start 2565 drivers/block/mtip32xx/mtip32xx.c jiffies_to_msecs(jiffies - start) / 1000); start 2571 drivers/block/mtip32xx/mtip32xx.c jiffies_to_msecs(jiffies - start) / 1000); start 2580 drivers/block/mtip32xx/mtip32xx.c jiffies_to_msecs(jiffies - start) / 1000); start 356 drivers/block/null_blk_main.c u64 start, end; start 372 drivers/block/null_blk_main.c ret = kstrtoull(buf + 1, 0, &start); start 379 drivers/block/null_blk_main.c if (start > end) start 384 drivers/block/null_blk_main.c ret = badblocks_set(&t_dev->badblocks, start, start 385 drivers/block/null_blk_main.c end - start + 1, 1); start 387 drivers/block/null_blk_main.c ret = badblocks_clear(&t_dev->badblocks, start, start 388 drivers/block/null_blk_main.c end - start + 1); start 45 drivers/block/null_blk_zoned.c zone->start = sector; start 47 drivers/block/null_blk_zoned.c zone->wp = zone->start + zone->len; start 57 drivers/block/null_blk_zoned.c zone->start = zone->wp = sector; start 113 drivers/block/null_blk_zoned.c if (zone->wp == zone->start + zone->len) start 138 drivers/block/null_blk_zoned.c zone[i].wp = zone[i].start; start 146 drivers/block/null_blk_zoned.c zone->wp = zone->start; start 212 drivers/block/paride/pg.c int start; /* jiffies at command start */ start 608 drivers/block/paride/pg.c dev->start = jiffies; start 654 drivers/block/paride/pg.c hdr.duration = (jiffies - dev->start + HZ / 2) / HZ; start 2158 drivers/block/rbd.c void *p, *start; start 2169 drivers/block/rbd.c p = start = page_address(pages[0]); start 2180 drivers/block/rbd.c osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0, start 499 drivers/block/rsxx/core.c signed long start; start 504 drivers/block/rsxx/core.c start = jiffies; start 510 drivers/block/rsxx/core.c (jiffies - start < timeout)); start 523 drivers/block/rsxx/core.c start = jiffies; start 529 drivers/block/rsxx/core.c (jiffies - start < timeout)); start 902 drivers/block/swim.c if (!request_mem_region(res->start, resource_size(res), CARDNAME)) { start 907 drivers/block/swim.c swim_base = (struct swim __iomem *)res->start; start 942 drivers/block/swim.c release_mem_region(res->start, resource_size(res)); start 971 drivers/block/swim.c release_mem_region(res->start, resource_size(res)); start 309 drivers/block/sx8.c __le32 start; start 621 drivers/block/sx8.c ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); start 770 drivers/block/sx8.c carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i])); start 1201 drivers/block/xsysace.c physaddr = dev->resource[i].start; start 1203 drivers/block/xsysace.c irq = dev->resource[i].start; start 73 drivers/block/z2ram.c unsigned long start = blk_rq_pos(req) << 9; start 78 drivers/block/z2ram.c if (start + len > z2ram_size) { start 89 drivers/block/z2ram.c unsigned long addr = start & Z2RAM_CHUNKMASK; start 95 drivers/block/z2ram.c addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; start 100 drivers/block/z2ram.c start += size; start 160 drivers/block/zram/zram_drv.c sector_t start, unsigned int size) start 165 drivers/block/zram/zram_drv.c if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) start 170 drivers/block/zram/zram_drv.c end = start + (size >> SECTOR_SHIFT); start 173 drivers/block/zram/zram_drv.c if (unlikely(start >= bound || end > bound || start > end)) start 162 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 177 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 233 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 378 drivers/bluetooth/bluecard_cs.c iobase = info->p_dev->resource[0]->start; start 508 drivers/bluetooth/bluecard_cs.c iobase = info->p_dev->resource[0]->start; start 623 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 638 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 684 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 797 drivers/bluetooth/bluecard_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 856 drivers/bluetooth/bluecard_cs.c link->resource[0]->start = n ^ 0x300; start 189 drivers/bluetooth/bt3c_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 226 drivers/bluetooth/bt3c_cs.c iobase = info->p_dev->resource[0]->start; start 345 drivers/bluetooth/bt3c_cs.c iobase = info->p_dev->resource[0]->start; start 455 drivers/bluetooth/bt3c_cs.c iobase = info->p_dev->resource[0]->start; start 651 drivers/bluetooth/bt3c_cs.c if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) start 675 drivers/bluetooth/bt3c_cs.c p_dev->resource[0]->start = base[j]; start 147 drivers/bluetooth/dtl1_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 213 drivers/bluetooth/dtl1_cs.c iobase = info->p_dev->resource[0]->start; start 302 drivers/bluetooth/dtl1_cs.c iobase = info->p_dev->resource[0]->start; start 438 drivers/bluetooth/dtl1_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 481 drivers/bluetooth/dtl1_cs.c info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR) start 507 drivers/bluetooth/dtl1_cs.c unsigned int iobase = info->p_dev->resource[0]->start; start 1067 drivers/bluetooth/hci_bcm.c dev->irq = entry->res->start; start 482 drivers/bus/arm-cci.c ports[i].base = ioremap(res.start, resource_size(&res)); start 483 drivers/bus/arm-cci.c ports[i].phys = res.start; start 546 drivers/bus/arm-cci.c cci_ctrl_base = ioremap(res.start, resource_size(&res)); start 547 drivers/bus/arm-cci.c cci_ctrl_phys = res.start; start 617 drivers/bus/fsl-mc/dprc-driver.c mc_dev->regions[0].start, start 496 drivers/bus/fsl-mc/fsl-mc-bus.c regions[i].start = region_desc.base_address + start 501 drivers/bus/fsl-mc/fsl-mc-bus.c ®ions[i].start); start 511 drivers/bus/fsl-mc/fsl-mc-bus.c regions[i].end = regions[i].start + region_desc.size - 1; start 842 drivers/bus/fsl-mc/fsl-mc-bus.c mc_portal_phys_addr = res.start; start 202 drivers/bus/fsl-mc/mc-io.c mc_portal_phys_addr = dpmcp_dev->regions[0].start; start 351 drivers/bus/hisi_lpc.c sys_port = logic_pio_trans_hwaddr(&host->fwnode, res->start, len); start 355 drivers/bus/hisi_lpc.c res->start = sys_port; start 514 drivers/bus/hisi_lpc.c .iobase = res->start, start 105 drivers/bus/mips_cdmm.c (unsigned long long)dev->res.start, start 533 drivers/bus/mips_cdmm.c dev->res.start = bus->phys + drb * CDMM_DRB_SIZE; start 611 drivers/bus/mvebu-mbus.c mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) start 632 drivers/bus/mvebu-mbus.c *start = s; start 1294 drivers/bus/mvebu-mbus.c mem->start = reg[0]; start 1295 drivers/bus/mvebu-mbus.c mem->end = mem->start + reg[1] - 1; start 1301 drivers/bus/mvebu-mbus.c io->start = reg[0]; start 1302 drivers/bus/mvebu-mbus.c io->end = io->start + reg[1] - 1; start 1365 drivers/bus/mvebu-mbus.c mbuswins_res.start, start 1367 drivers/bus/mvebu-mbus.c sdramwins_res.start, start 1369 drivers/bus/mvebu-mbus.c mbusbridge_res.start, start 232 drivers/bus/omap_l3_smx.c l3->rt = ioremap(res->start, resource_size(res)); start 684 drivers/bus/ti-sysc.c ddata->offsets[reg] = res->start - ddata->module_pa; start 388 drivers/char/agp/efficeon-agp.c if (!r->start && r->end) { start 404 drivers/char/agp/i460-agp.c struct lp_desc *start, *end, *lp; start 414 drivers/char/agp/i460-agp.c start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; start 425 drivers/char/agp/i460-agp.c for (lp = start; lp <= end; ++lp) { start 429 drivers/char/agp/i460-agp.c for (idx = ((lp == start) ? start_offset : 0); start 438 drivers/char/agp/i460-agp.c for (lp = start, i = 0; lp <= end; ++lp) { start 449 drivers/char/agp/i460-agp.c for (idx = ((lp == start) ? start_offset : 0); start 465 drivers/char/agp/i460-agp.c struct lp_desc *start, *end, *lp; start 472 drivers/char/agp/i460-agp.c start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; start 477 drivers/char/agp/i460-agp.c for (i = 0, lp = start; lp <= end; ++lp) { start 478 drivers/char/agp/i460-agp.c for (idx = ((lp == start) ? start_offset : 0); start 777 drivers/char/agp/intel-agp.c if (!r->start && r->end) { start 903 drivers/char/agp/intel-gtt.c int start = intel_private.stolen_size / PAGE_SIZE; start 905 drivers/char/agp/intel-gtt.c intel_gtt_clear_range(start, end - start); start 1024 drivers/char/agp/intel-gtt.c pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); start 1029 drivers/char/agp/intel-gtt.c intel_private.ifp_resource.start = temp; start 1052 drivers/char/agp/intel-gtt.c upper_32_bits(intel_private.ifp_resource.start)); start 1053 drivers/char/agp/intel-gtt.c pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); start 1061 drivers/char/agp/intel-gtt.c intel_private.ifp_resource.start = l64; start 1073 drivers/char/agp/intel-gtt.c if (intel_private.ifp_resource.start) start 1090 drivers/char/agp/intel-gtt.c if (intel_private.ifp_resource.start) start 1091 drivers/char/agp/intel-gtt.c intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); start 1103 drivers/char/agp/intel-gtt.c intel_private.ifp_resource.start = 0; start 40 drivers/char/agp/isoch.c struct list_head *pos, *tmp, *head = &list->list, *start = head->next; start 45 drivers/char/agp/isoch.c for (pos=start; pos!=head; ) { start 206 drivers/char/bsr.c cur->bsr_addr = res.start; start 781 drivers/char/hpet.c unsigned long t, m, count, i, flags, start; start 803 drivers/char/hpet.c start = read_counter(&hpet->hpet_mc); start 808 drivers/char/hpet.c } while (i++, (m - start) < count); start 812 drivers/char/hpet.c return (m - start) / i; start 55 drivers/char/hw_random/nomadik-rng.c base = devm_ioremap(&dev->dev, dev->res.start, start 85 drivers/char/hw_random/octeon-rng.c res_ports->start, start 91 drivers/char/hw_random/octeon-rng.c res_result->start, start 120 drivers/char/hw_random/timeriomem-rng.c if (res->start % 4 != 0 || resource_size(res) < 4) { start 69 drivers/char/ipmi/ipmi_plat_data.c r[0].start = p->addr; start 70 drivers/char/ipmi/ipmi_plat_data.c r[0].end = r[0].start + p->regsize - 1; start 75 drivers/char/ipmi/ipmi_plat_data.c r[1].start = r[0].start + p->regspacing; start 76 drivers/char/ipmi/ipmi_plat_data.c r[1].end = r[1].start + p->regsize - 1; start 83 drivers/char/ipmi/ipmi_plat_data.c r[2].start = r[1].start + p->regspacing; start 84 drivers/char/ipmi/ipmi_plat_data.c r[2].end = r[2].start + p->regsize - 1; start 91 drivers/char/ipmi/ipmi_plat_data.c r[num_r].start = p->irq; start 19 drivers/char/ipmi/ipmi_si_parisc.c io.addr_data = dev->hpa.start; start 121 drivers/char/ipmi/ipmi_si_platform.c io->addr_data = res->start; start 129 drivers/char/ipmi/ipmi_si_platform.c if (res_second->start > io->addr_data) start 130 drivers/char/ipmi/ipmi_si_platform.c io->regspacing = res_second->start - io->addr_data; start 283 drivers/char/ipmi/ipmi_si_platform.c io.addr_data = resource.start; start 41 drivers/char/mem.c static inline unsigned long size_inside_page(unsigned long start, start 46 drivers/char/mem.c sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); start 93 drivers/char/misc.c .start = misc_seq_start, start 423 drivers/char/pcmcia/cm4000_cs.c unsigned int iobase = dev->p_dev->resource[0]->start; start 456 drivers/char/pcmcia/cm4000_cs.c unsigned int iobase = dev->p_dev->resource[0]->start; start 665 drivers/char/pcmcia/cm4000_cs.c unsigned int iobase = dev->p_dev->resource[0]->start; start 925 drivers/char/pcmcia/cm4000_cs.c unsigned int iobase = dev->p_dev->resource[0]->start; start 1049 drivers/char/pcmcia/cm4000_cs.c unsigned int iobase = dev->p_dev->resource[0]->start; start 1402 drivers/char/pcmcia/cm4000_cs.c unsigned int iobase = dev->p_dev->resource[0]->start; start 110 drivers/char/pcmcia/cm4040_cs.c unsigned int obs = xinb(dev->p_dev->resource[0]->start start 141 drivers/char/pcmcia/cm4040_cs.c int iobase = dev->p_dev->resource[0]->start; start 171 drivers/char/pcmcia/cm4040_cs.c int iobase = dev->p_dev->resource[0]->start; start 189 drivers/char/pcmcia/cm4040_cs.c int iobase = dev->p_dev->resource[0]->start; start 219 drivers/char/pcmcia/cm4040_cs.c int iobase = dev->p_dev->resource[0]->start; start 321 drivers/char/pcmcia/cm4040_cs.c int iobase = dev->p_dev->resource[0]->start; start 259 drivers/char/pcmcia/scr24x_cs.c link->resource[PCMCIA_IOPORT_0]->start, start 600 drivers/char/pcmcia/synclink_cs.c info->io_base = link->resource[0]->start; start 1577 drivers/char/pcmcia/synclink_cs.c goto start; start 1598 drivers/char/pcmcia/synclink_cs.c start: start 2799 drivers/char/pcmcia/synclink_cs.c .start = tx_release, start 277 drivers/char/ps3flash.c static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 337 drivers/char/ps3flash.c tmp = dev->regions[dev->region_idx].start*dev->blk_size; start 1278 drivers/char/random.c static void add_interrupt_bench(cycles_t start) start 1280 drivers/char/random.c long delta = random_get_entropy() - start; start 2455 drivers/char/random.c randomize_page(unsigned long start, unsigned long range) start 2457 drivers/char/random.c if (!PAGE_ALIGNED(start)) { start 2458 drivers/char/random.c range -= PAGE_ALIGN(start) - start; start 2459 drivers/char/random.c start = PAGE_ALIGN(start); start 2462 drivers/char/random.c if (start > ULONG_MAX - range) start 2463 drivers/char/random.c range = ULONG_MAX - start; start 2468 drivers/char/random.c return start; start 2470 drivers/char/random.c return start + (get_random_long() % range << PAGE_SHIFT); start 869 drivers/char/rtc.c rtc_port = op->resource[0].start; start 50 drivers/char/tpm/eventlog/acpi.c u64 len, start; start 74 drivers/char/tpm/eventlog/acpi.c start = buff->server.log_start_addr; start 79 drivers/char/tpm/eventlog/acpi.c start = buff->client.log_start_addr; start 94 drivers/char/tpm/eventlog/acpi.c virt = acpi_os_map_iomem(start, len); start 285 drivers/char/tpm/eventlog/tpm1.c .start = tpm1_bios_measurements_start, start 292 drivers/char/tpm/eventlog/tpm1.c .start = tpm1_bios_measurements_start, start 157 drivers/char/tpm/eventlog/tpm2.c .start = tpm2_bios_measurements_start, start 114 drivers/char/tpm/tpm_crb.c ktime_t start; start 117 drivers/char/tpm/tpm_crb.c start = ktime_get(); start 118 drivers/char/tpm/tpm_crb.c stop = ktime_add(start, ms_to_ktime(timeout)); start 451 drivers/char/tpm/tpm_crb.c struct resource *io_res, u64 start, u32 size) start 454 drivers/char/tpm/tpm_crb.c .start = start, start 455 drivers/char/tpm/tpm_crb.c .end = start + size - 1, start 460 drivers/char/tpm/tpm_crb.c if (start != new_res.start) start 466 drivers/char/tpm/tpm_crb.c return priv->iobase + (new_res.start - io_res->start); start 475 drivers/char/tpm/tpm_crb.c u64 start, u64 size) start 477 drivers/char/tpm/tpm_crb.c if (io_res->start > start || io_res->end < start) start 480 drivers/char/tpm/tpm_crb.c if (start + size - 1 <= io_res->end) start 485 drivers/char/tpm/tpm_crb.c io_res, start, size); start 487 drivers/char/tpm/tpm_crb.c return io_res->end - start + 1; start 526 drivers/char/tpm/tpm_crb.c if (buf->control_address == io_res.start + start 282 drivers/char/tpm/tpm_ppi.c static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, start 304 drivers/char/tpm/tpm_ppi.c for (i = start; i <= end; i++) { start 335 drivers/char/tpm/tpm_tis.c .start = 0xFED40000, start 650 drivers/char/xilinx_hwicap/xilinx_hwicap.c drvdata->mem_start = regs_res->start; start 657 drivers/char/xilinx_hwicap/xilinx_hwicap.c (unsigned long long) regs_res->start); start 697 drivers/char/xilinx_hwicap/xilinx_hwicap.c release_mem_region(regs_res->start, drvdata->mem_size); start 830 drivers/clk/bcm/clk-kona-setup.c ccu->base = ioremap(res.start, ccu->range); start 557 drivers/clk/clk-npcm7xx.c clk_base = ioremap(res.start, resource_size(&res)); start 1019 drivers/clk/clk-qoriq.c idx = (res.start & 0xf0) >> 5; start 1305 drivers/clk/clk-qoriq.c if ((res.start & 0xfff) == 0xc00) { start 1312 drivers/clk/clk-qoriq.c idx = (res.start & 0xf0) >> 5; start 145 drivers/clk/hisilicon/clk-hi3660-stub.c freq_reg = devm_ioremap(dev, res->start, resource_size(res)); start 41 drivers/clk/hisilicon/clk.c res->start, resource_size(res)); start 179 drivers/clk/imx/clk-imx8qxp-lpcg.c base = devm_ioremap(dev, res->start, resource_size(res)); start 210 drivers/clk/qcom/clk-spmi-pmic-div.c u32 start; start 212 drivers/clk/qcom/clk-spmi-pmic-div.c ret = of_property_read_u32(of_node, "reg", &start); start 264 drivers/clk/qcom/clk-spmi-pmic-div.c clkdiv[i].base = start + i * 0x100; start 96 drivers/clk/qcom/gdsc.c ktime_t start; start 98 drivers/clk/qcom/gdsc.c start = ktime_get(); start 102 drivers/clk/qcom/gdsc.c } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US); start 440 drivers/clk/samsung/clk-pll.c ktime_t start; start 492 drivers/clk/samsung/clk-pll.c start = ktime_get(); start 494 drivers/clk/samsung/clk-pll.c ktime_t delta = ktime_sub(ktime_get(), start); start 591 drivers/clk/samsung/clk-pll.c ktime_t start; start 651 drivers/clk/samsung/clk-pll.c start = ktime_get(); start 653 drivers/clk/samsung/clk-pll.c ktime_t delta = ktime_sub(ktime_get(), start); start 85 drivers/clk/sunxi/clk-simple-gates.c release_mem_region(res.start, resource_size(&res)); start 219 drivers/clk/sunxi/clk-sun4i-display.c release_mem_region(res.start, resource_size(&res)); start 86 drivers/clk/sunxi/clk-sun4i-pll3.c release_mem_region(res.start, resource_size(&res)); start 284 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c release_mem_region(res.start, resource_size(&res)); start 82 drivers/clk/sunxi/clk-sun8i-apb0.c release_mem_region(res.start, resource_size(&res)); start 103 drivers/clk/sunxi/clk-sun8i-bus-gates.c release_mem_region(res.start, resource_size(&res)); start 105 drivers/clk/sunxi/clk-sun8i-mbus.c release_mem_region(res.start, resource_size(&res)); start 237 drivers/clk/sunxi/clk-sun9i-cpus.c release_mem_region(res.start, resource_size(&res)); start 1938 drivers/clk/tegra/clk-dfll.c td->base = devm_ioremap(td->dev, mem->start, resource_size(mem)); start 1950 drivers/clk/tegra/clk-dfll.c td->i2c_base = devm_ioremap(td->dev, mem->start, resource_size(mem)); start 1962 drivers/clk/tegra/clk-dfll.c td->i2c_controller_base = devm_ioremap(td->dev, mem->start, start 1976 drivers/clk/tegra/clk-dfll.c td->lut_base = devm_ioremap(td->dev, mem->start, resource_size(mem)); start 903 drivers/clk/ti/adpll.c d->pa = res->start; start 62 drivers/clk/ti/clkctrl.c ktime_t start; start 112 drivers/clk/ti/clkctrl.c if (!ktime_to_ns(time->start)) { start 113 drivers/clk/ti/clkctrl.c time->start = ktime_get(); start 117 drivers/clk/ti/clkctrl.c if (ktime_us_delta(ktime_get(), time->start) < timeout) { start 75 drivers/clk/ux500/u8500_of_clk.c bases[i] = r.start; start 601 drivers/clk/zynq/clkc.c zynq_clkc_base = (__force void __iomem *)slcr->data + res.start; start 1425 drivers/clocksource/arm_arch_timer.c timer_mem->cntctlbase = res.start; start 1455 drivers/clocksource/arm_arch_timer.c frame->cntbase = res.start; start 296 drivers/clocksource/sh_cmt.c static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) start 305 drivers/clocksource/sh_cmt.c if (start) start 904 drivers/clocksource/sh_cmt.c cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); start 183 drivers/clocksource/sh_mtu2.c static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) start 191 drivers/clocksource/sh_mtu2.c if (start) start 376 drivers/clocksource/sh_mtu2.c mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); start 123 drivers/clocksource/sh_tmu.c static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) start 131 drivers/clocksource/sh_tmu.c if (start) start 485 drivers/clocksource/sh_tmu.c tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); start 215 drivers/clocksource/timer-atcpit100.c timer_info.mapping_base = (unsigned long)timer_res.start; start 257 drivers/clocksource/timer-davinci.c if (!request_mem_region(timer_cfg->reg.start, start 264 drivers/clocksource/timer-davinci.c base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg)); start 297 drivers/clocksource/timer-davinci.c rv = request_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start, start 229 drivers/clocksource/timer-qcom.c cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); start 839 drivers/clocksource/timer-ti-dm.c timer->irq = irq->start; start 911 drivers/clocksource/timer-ti-dm.c .start = omap_dm_timer_start, start 148 drivers/clocksource/timer-zevio.c (unsigned long long)res.start, node); start 152 drivers/clocksource/timer-zevio.c (unsigned long long)res.start, node); start 303 drivers/counter/ftm-quaddec.c ftm->ftm_base = devm_ioremap(&pdev->dev, io->start, resource_size(io)); start 2246 drivers/cpufreq/cpufreq.c if (policy->governor->start) { start 2247 drivers/cpufreq/cpufreq.c ret = policy->governor->start(policy); start 322 drivers/cpufreq/cpufreq_conservative.c .start = cs_start, start 536 drivers/cpufreq/cpufreq_governor.c gov->start(policy); start 141 drivers/cpufreq/cpufreq_governor.h void (*start)(struct cpufreq_policy *policy); start 163 drivers/cpufreq/cpufreq_governor.h .start = cpufreq_dbs_governor_start, \ start 408 drivers/cpufreq/cpufreq_ondemand.c .start = od_start, start 121 drivers/cpufreq/cpufreq_userspace.c .start = cpufreq_userspace_policy_start, start 157 drivers/cpufreq/pasemi-cpufreq.c sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000); start 175 drivers/cpufreq/pasemi-cpufreq.c sdcpwr_mapbase = ioremap(res.start, 0x1000); start 208 drivers/cpufreq/qcom-cpufreq-hw.c base = devm_ioremap(dev, res->start, resource_size(res)); start 105 drivers/crypto/atmel-aes.c atmel_aes_fn_t start; start 969 drivers/crypto/atmel-aes.c err = ctx->start(dd); start 1017 drivers/crypto/atmel-aes.c u16 blocks, start, end; start 1031 drivers/crypto/atmel-aes.c start = ctr & 0xffff; start 1032 drivers/crypto/atmel-aes.c end = start + blocks - 1; start 1034 drivers/crypto/atmel-aes.c if (blocks >> 16 || end < start) { start 1036 drivers/crypto/atmel-aes.c datalen = AES_BLOCK_SIZE * (0x10000 - start); start 1247 drivers/crypto/atmel-aes.c ctx->base.start = atmel_aes_start; start 1257 drivers/crypto/atmel-aes.c ctx->base.start = atmel_aes_ctr_start; start 1817 drivers/crypto/atmel-aes.c ctx->base.start = atmel_aes_gcm_start; start 1947 drivers/crypto/atmel-aes.c ctx->base.start = atmel_aes_xts_start; start 2168 drivers/crypto/atmel-aes.c ctx->base.start = atmel_aes_authenc_start; start 2665 drivers/crypto/atmel-aes.c aes_dd->phys_base = aes_res->start; start 113 drivers/crypto/atmel-sha.c atmel_sha_fn_t start; start 1090 drivers/crypto/atmel-sha.c err = ctx->start(dd); start 1246 drivers/crypto/atmel-sha.c ctx->start = atmel_sha_start; start 2069 drivers/crypto/atmel-sha.c hmac->base.start = atmel_sha_hmac_start; start 2317 drivers/crypto/atmel-sha.c tctx->start = atmel_sha_authenc_start; start 2772 drivers/crypto/atmel-sha.c sha_dd->phys_base = sha_res->start; start 1265 drivers/crypto/atmel-tdes.c tdes_dd->phys_base = tdes_res->start; start 526 drivers/crypto/caam/jr.c ctrl = devm_ioremap(jrdev, r->start, resource_size(r)); start 21 drivers/crypto/ccp/ccp-dev-v3.c int start; start 27 drivers/crypto/ccp/ccp-dev-v3.c start = (u32)bitmap_find_next_zero_area(ccp->sb, start 31 drivers/crypto/ccp/ccp-dev-v3.c if (start <= ccp->sb_count) { start 32 drivers/crypto/ccp/ccp-dev-v3.c bitmap_set(ccp->sb, start, count); start 47 drivers/crypto/ccp/ccp-dev-v3.c return KSB_START + start; start 50 drivers/crypto/ccp/ccp-dev-v3.c static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, start 55 drivers/crypto/ccp/ccp-dev-v3.c if (!start) start 60 drivers/crypto/ccp/ccp-dev-v3.c bitmap_clear(ccp->sb, start - KSB_START, count); start 28 drivers/crypto/ccp/ccp-dev-v5.c int start; start 32 drivers/crypto/ccp/ccp-dev-v5.c start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, start 35 drivers/crypto/ccp/ccp-dev-v5.c if (start < LSB_SIZE) { start 36 drivers/crypto/ccp/ccp-dev-v5.c bitmap_set(cmd_q->lsbmap, start, count); start 37 drivers/crypto/ccp/ccp-dev-v5.c return start + cmd_q->lsb * LSB_SIZE; start 46 drivers/crypto/ccp/ccp-dev-v5.c start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, start 50 drivers/crypto/ccp/ccp-dev-v5.c if (start <= MAX_LSB_CNT * LSB_SIZE) { start 51 drivers/crypto/ccp/ccp-dev-v5.c bitmap_set(ccp->lsbmap, start, count); start 54 drivers/crypto/ccp/ccp-dev-v5.c return start; start 70 drivers/crypto/ccp/ccp-dev-v5.c static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, start 73 drivers/crypto/ccp/ccp-dev-v5.c if (!start) start 76 drivers/crypto/ccp/ccp-dev-v5.c if (cmd_q->lsb == start) { start 78 drivers/crypto/ccp/ccp-dev-v5.c bitmap_clear(cmd_q->lsbmap, start, count); start 84 drivers/crypto/ccp/ccp-dev-v5.c bitmap_clear(ccp->lsbmap, start, count); start 337 drivers/crypto/ccree/cc_driver.c &req_mem_cc_regs->start, new_drvdata->cc_base); start 35 drivers/crypto/ccree/cc_sram_mgr.c dma_addr_t start = 0; start 40 drivers/crypto/ccree/cc_sram_mgr.c start = (dma_addr_t)cc_ioread(drvdata, start 43 drivers/crypto/ccree/cc_sram_mgr.c if ((start & 0x3) != 0) { start 44 drivers/crypto/ccree/cc_sram_mgr.c dev_err(dev, "Invalid SRAM offset %pad\n", &start); start 55 drivers/crypto/ccree/cc_sram_mgr.c ctx->sram_free_offset = start; start 109 drivers/crypto/chelsio/chtls/chtls.h unsigned int start; start 132 drivers/crypto/chelsio/chtls/chtls_hw.c cdev->kmap.start = lldi->vr->key.start; start 328 drivers/crypto/chelsio/chtls/chtls_hw.c kaddr = keyid_to_addr(cdev->kmap.start, keyid); start 291 drivers/crypto/chelsio/chtls/chtls_io.c kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey); start 240 drivers/crypto/hisilicon/sec/sec_drv.c queue->regs = ioremap(res->start, resource_size(res)); start 1022 drivers/crypto/hisilicon/sec/sec_drv.c info->regs[i] = devm_ioremap(info->dev, res->start, start 181 drivers/crypto/hisilicon/zip/zip_crypto.c static u16 get_extra_field_size(const u8 *start) start 183 drivers/crypto/hisilicon/zip/zip_crypto.c return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; start 186 drivers/crypto/hisilicon/zip/zip_crypto.c static u32 get_name_field_size(const u8 *start) start 188 drivers/crypto/hisilicon/zip/zip_crypto.c return strlen(start) + 1; start 191 drivers/crypto/hisilicon/zip/zip_crypto.c static u32 get_comment_field_size(const u8 *start) start 193 drivers/crypto/hisilicon/zip/zip_crypto.c return strlen(start) + 1; start 977 drivers/crypto/img-hash.c hdev->bus_addr = hash_res->start; start 410 drivers/crypto/marvell/cesa.c engine->sram_dma = dma_map_resource(cesa->dev, res->start, start 111 drivers/crypto/mediatek/mtk-aes.c mtk_aes_fn start; start 544 drivers/crypto/mediatek/mtk-aes.c return ctx->start(cryp, aes); start 576 drivers/crypto/mediatek/mtk-aes.c u32 start, end, ctr, blocks; start 591 drivers/crypto/mediatek/mtk-aes.c start = ctr; start 592 drivers/crypto/mediatek/mtk-aes.c end = start + blocks - 1; start 593 drivers/crypto/mediatek/mtk-aes.c if (end < start) { start 595 drivers/crypto/mediatek/mtk-aes.c datalen = AES_BLOCK_SIZE * -start; start 737 drivers/crypto/mediatek/mtk-aes.c ctx->base.start = mtk_aes_start; start 746 drivers/crypto/mediatek/mtk-aes.c ctx->base.start = mtk_aes_ctr_start; start 1126 drivers/crypto/mediatek/mtk-aes.c ctx->base.start = mtk_aes_gcm_start; start 38 drivers/crypto/nx/nx-842-powernv.c ktime_t start; start 183 drivers/crypto/nx/nx-842-powernv.c ktime_t start = wmem->start, now = ktime_get(); start 184 drivers/crypto/nx/nx-842-powernv.c ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); start 199 drivers/crypto/nx/nx-842-powernv.c (long)ktime_us_delta(now, start)); start 379 drivers/crypto/nx/nx-842-powernv.c (unsigned long)ktime_us_delta(now, start)); start 483 drivers/crypto/nx/nx-842-powernv.c wmem->start = ktime_get(); start 580 drivers/crypto/nx/nx-842-powernv.c wmem->start = ktime_get(); start 293 drivers/crypto/nx/nx-842-pseries.c unsigned long start = get_tb(); start 375 drivers/crypto/nx/nx-842-pseries.c (get_tb() - start) / tb_ticks_per_usec); start 423 drivers/crypto/nx/nx-842-pseries.c unsigned long start = get_tb(); start 506 drivers/crypto/nx/nx-842-pseries.c (get_tb() - start) / tb_ticks_per_usec); start 151 drivers/crypto/nx/nx.c unsigned int start, start 163 drivers/crypto/nx/nx.c if (start < offset + sg_src->length) start 172 drivers/crypto/nx/nx.c scatterwalk_advance(&walk, start - offset); start 1150 drivers/crypto/omap-aes.c dd->phys_base = res.start; start 23 drivers/crypto/omap-aes.h #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) start 24 drivers/crypto/omap-aes.h #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) start 1008 drivers/crypto/omap-des.c dd->phys_base = res->start; start 2115 drivers/crypto/omap-sham.c dd->phys_base = res.start; start 1671 drivers/crypto/picoxcell_crypto.c if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, start 93 drivers/crypto/qat/qat_common/adf_cfg.c .start = qat_dev_cfg_start, start 125 drivers/crypto/qat/qat_common/adf_transport_debug.c .start = adf_ring_start, start 236 drivers/crypto/qat/qat_common/adf_transport_debug.c .start = adf_bank_start, start 228 drivers/crypto/rockchip/rk3288_crypto.c err = dev->start(dev); start 217 drivers/crypto/rockchip/rk3288_crypto.h int (*start)(struct rk_crypto_info *dev); start 388 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c ctx->dev->start = rk_ablk_start; start 288 drivers/crypto/rockchip/rk3288_crypto_ahash.c tctx->dev->start = rk_ahash_start; start 796 drivers/crypto/sahara.c int start) start 820 drivers/crypto/sahara.c for (i = start; i < dev->nb_in_sg + start; i++) { start 823 drivers/crypto/sahara.c if (i == (dev->nb_in_sg + start - 1)) { start 329 drivers/crypto/stm32/stm32-cryp.c unsigned int start, unsigned int nbytes, int out) start 337 drivers/crypto/stm32/stm32-cryp.c scatterwalk_advance(&walk, start); start 1446 drivers/crypto/stm32/stm32-hash.c hdev->phys_base = res->start; start 3313 drivers/crypto/talitos.c priv->reg = devm_ioremap(dev, res->start, resource_size(res)); start 1359 drivers/crypto/ux500/cryp/cryp_core.c device_data->phybase = res->start; start 1421 drivers/crypto/ux500/cryp/cryp_core.c ret = devm_request_irq(&pdev->dev, res_irq->start, start 1577 drivers/crypto/ux500/cryp/cryp_core.c disable_irq(res_irq->start); start 1631 drivers/crypto/ux500/cryp/cryp_core.c enable_irq(res_irq->start); start 1674 drivers/crypto/ux500/hash/hash_core.c device_data->phybase = res->start; start 244 drivers/dax/bus.c if (!IS_ALIGNED(res->start, align) start 302 drivers/dax/bus.c return dax_region->res.start; start 69 drivers/dax/device.c phys = pgoff * PAGE_SIZE + res->start; start 70 drivers/dax/device.c if (phys >= res->start && phys <= res->end) { start 425 drivers/dax/device.c if (!devm_request_mem_region(dev, res->start, resource_size(res), start 43 drivers/dax/kmem.c kmem_start = ALIGN(res->start, memory_block_size_bytes()); start 47 drivers/dax/kmem.c kmem_size -= kmem_start - res->start; start 73 drivers/dax/kmem.c rc = add_memory(numa_node, new_res->start, resource_size(new_res)); start 90 drivers/dax/kmem.c resource_size_t kmem_start = res->start; start 42 drivers/dax/pmem/core.c if (!devm_request_mem_region(dev, nsio->res.start, offset, start 54 drivers/dax/pmem/core.c res.start += offset; start 70 drivers/dax/super.c struct block_device *bdev, int blocksize, sector_t start, start 88 drivers/dax/super.c err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); start 95 drivers/dax/super.c last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; start 319 drivers/dax/super.c int blocksize, sector_t start, sector_t len) start 324 drivers/dax/super.c return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len); start 41 drivers/dio/dio.c { .name = "DIO mem", .start = 0x00600000, .end = 0x007fffff }, start 43 drivers/dio/dio.c { .name = "DIO-II mem", .start = 0x01000000, .end = 0x1fffffff } start 226 drivers/dio/dio.c dev->resource.start = pa; start 60 drivers/dma/acpi-dma.c mem = rentry->res->start; start 62 drivers/dma/acpi-dma.c irq = rentry->res->start; start 768 drivers/dma/altera-msgdma.c region = devm_request_mem_region(device, (*res)->start, start 775 drivers/dma/altera-msgdma.c *ptr = devm_ioremap_nocache(device, region->start, start 2730 drivers/dma/amba-pl08x.c pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); start 2965 drivers/dma/amba-pl08x.c (unsigned long long)adev->res.start, adev->irq[0]); start 1833 drivers/dma/at_hdmac.c if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { start 1838 drivers/dma/at_hdmac.c atdma->regs = ioremap(io->start, size); start 1986 drivers/dma/at_hdmac.c release_mem_region(io->start, size); start 2025 drivers/dma/at_hdmac.c release_mem_region(io->start, resource_size(io)); start 128 drivers/dma/bestcomm/bestcomm.c bcom_eng->tdt[tsk->tasknum].start = 0; start 163 drivers/dma/bestcomm/bestcomm.c if (tdt->start) { start 181 drivers/dma/bestcomm/bestcomm.c tdt->start = start_pa; start 416 drivers/dma/bestcomm/bestcomm.c if (!request_mem_region(res_bcom.start, resource_size(&res_bcom), start 424 drivers/dma/bestcomm/bestcomm.c bcom_eng->regs_base = res_bcom.start; start 425 drivers/dma/bestcomm/bestcomm.c bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma)); start 448 drivers/dma/bestcomm/bestcomm.c release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma)); start 2640 drivers/dma/coh901318.c io->start, start 2655 drivers/dma/coh901318.c base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io)); start 331 drivers/dma/dma-axi-dmac.c unsigned int start = active->num_completed - 1; start 342 drivers/dma/dma-axi-dmac.c for (i = start; i < active->num_sgs; i++) { start 326 drivers/dma/dmatest.c static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, start 333 drivers/dma/dmatest.c for (i = 0; i < start; i++) start 335 drivers/dma/dmatest.c for ( ; i < start + len; i++) start 343 drivers/dma/dmatest.c static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, start 350 drivers/dma/dmatest.c for (i = 0; i < start; i++) start 352 drivers/dma/dmatest.c for ( ; i < start + len; i++) start 382 drivers/dma/dmatest.c static unsigned int dmatest_verify(u8 **bufs, unsigned int start, start 395 drivers/dma/dmatest.c for (i = start; i < end; i++) { start 572 drivers/dma/dmatest.c ktime_t ktime, start, diff; start 707 drivers/dma/dmatest.c start = ktime_get(); start 713 drivers/dma/dmatest.c diff = ktime_sub(ktime_get(), start); start 840 drivers/dma/dmatest.c start = ktime_get(); start 863 drivers/dma/dmatest.c diff = ktime_sub(ktime_get(), start); start 135 drivers/dma/dw-edma/dw-edma-pcie.c dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; start 141 drivers/dma/dw-edma/dw-edma-pcie.c dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; start 147 drivers/dma/dw-edma/dw-edma-pcie.c dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; start 46 drivers/dma/dw-edma/dw-edma-v0-debugfs.c void __iomem *start; start 66 drivers/dma/dw-edma/dw-edma-v0-debugfs.c if (lim[0][ch].start >= reg && reg < lim[0][ch].end) { start 67 drivers/dma/dw-edma/dw-edma-v0-debugfs.c ptr += (reg - lim[0][ch].start); start 72 drivers/dma/dw-edma/dw-edma-v0-debugfs.c if (lim[1][ch].start >= reg && reg < lim[1][ch].end) { start 73 drivers/dma/dw-edma/dw-edma-v0-debugfs.c ptr += (reg - lim[1][ch].start); start 192 drivers/dma/dw-edma/dw-edma-v0-debugfs.c lim[0][i].start = ®s->type.unroll.ch[i].wr; start 261 drivers/dma/dw-edma/dw-edma-v0-debugfs.c lim[1][i].start = ®s->type.unroll.ch[i].rd; start 768 drivers/dma/fsl_raid.c re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res)); start 1144 drivers/dma/fsldma.c chan->id = (res.start & 0xfff) < 0x300 ? start 1145 drivers/dma/fsldma.c ((res.start - 0x100) & 0xfff) >> 7 : start 1146 drivers/dma/fsldma.c ((res.start - 0x200) & 0xfff) >> 7; start 2137 drivers/dma/imx-sdma.c sdma->spba_start_addr = spba_res.start; start 1278 drivers/dma/iop-adma.c if (!devm_request_mem_region(&pdev->dev, res->start, start 1349 drivers/dma/iop-adma.c iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, start 1701 drivers/dma/ipu/ipu_idmac.c ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); start 1708 drivers/dma/ipu/ipu_idmac.c ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); start 1732 drivers/dma/ipu/ipu_idmac.c (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err); start 104 drivers/dma/mcf-edma.c for (ret = 0, i = res->start; i <= res->end; ++i) start 114 drivers/dma/mcf-edma.c for (ret = 0, i = res->start; i <= res->end; ++i) start 147 drivers/dma/mcf-edma.c for (irq = res->start; irq <= res->end; irq++) start 154 drivers/dma/mcf-edma.c for (irq = res->start; irq <= res->end; irq++) start 841 drivers/dma/mediatek/mtk-cqdma.c cqdma->pc[i]->irq = res->start; start 932 drivers/dma/mediatek/mtk-hsdma.c hsdma->irq = res->start; start 790 drivers/dma/mmp_pdma.c u32 start, end, len; start 793 drivers/dma/mmp_pdma.c start = sw->desc.dtadr; start 795 drivers/dma/mmp_pdma.c start = sw->desc.dsadr; start 798 drivers/dma/mmp_pdma.c end = start + len; start 810 drivers/dma/mmp_pdma.c } else if (curr >= start && curr <= end) { start 181 drivers/dma/mpc512x_dma.c u32 start:1; /* Channel start */ start 292 drivers/dma/mpc512x_dma.c mdesc->tcd->start = 1; start 938 drivers/dma/mpc512x_dma.c regs_start = res.start; start 1310 drivers/dma/mv_xor.c xordev->xor_base = devm_ioremap(&pdev->dev, res->start, start 1319 drivers/dma/mv_xor.c xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, start 1335 drivers/dma/nbpfaxi.c for (irq = irq_res->start; irq <= irq_res->end; start 240 drivers/dma/of-dma.c int count, i, start; start 264 drivers/dma/of-dma.c start = atomic_inc_return(&last_index); start 267 drivers/dma/of-dma.c (i + start) % count, start 4060 drivers/dma/ppc4xx/adma.c if (!request_mem_region(res.start, resource_size(&res), start 4094 drivers/dma/ppc4xx/adma.c regs = ioremap(res.start, resource_size(&res)); start 4226 drivers/dma/ppc4xx/adma.c release_mem_region(res.start, resource_size(&res)); start 4283 drivers/dma/ppc4xx/adma.c release_mem_region(res.start, resource_size(&res)); start 4473 drivers/dma/ppc4xx/adma.c dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32)); start 4474 drivers/dma/ppc4xx/adma.c dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) | start 1127 drivers/dma/pxa_dma.c u32 curr, start, len, end, residue = 0; start 1164 drivers/dma/pxa_dma.c start = hw_desc->dsadr; start 1166 drivers/dma/pxa_dma.c start = hw_desc->dtadr; start 1168 drivers/dma/pxa_dma.c end = start + len; start 1181 drivers/dma/pxa_dma.c } else if (curr >= start && curr <= end) { start 123 drivers/dma/qcom/hidma_dbg.c seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start); start 127 drivers/dma/qcom/hidma_dbg.c seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start); start 309 drivers/dma/qcom/hidma_mgmt.c &res->start, mgmtdev->dma_channels); start 925 drivers/dma/sa11x0-dma.c d->base = ioremap(res->start, resource_size(res)); start 792 drivers/dma/sh/shdmac.c errirq = errirq_res->start; start 806 drivers/dma/sh/shdmac.c if (chanirq_res->start == chanirq_res->end && start 811 drivers/dma/sh/shdmac.c chan_irq[irq_cnt] = chanirq_res->start; start 820 drivers/dma/sh/shdmac.c for (i = chanirq_res->start; i <= chanirq_res->end; i++) { start 885 drivers/dma/sirf-dma.c regs_start = res.start; start 3138 drivers/dma/ste_dma40.c if (request_mem_region(res->start, resource_size(res), start 3142 drivers/dma/ste_dma40.c virtbase = ioremap(res->start, resource_size(res)); start 3195 drivers/dma/ste_dma40.c rev, &res->start, num_phy_chans, num_log_chans); start 3209 drivers/dma/ste_dma40.c base->phy_start = res->start; start 3309 drivers/dma/ste_dma40.c release_mem_region(res->start, resource_size(res)); start 3548 drivers/dma/ste_dma40.c base->phy_lcpa = res->start; start 3550 drivers/dma/ste_dma40.c if (request_mem_region(res->start, resource_size(res), start 3559 drivers/dma/ste_dma40.c if (res->start != val && val != 0) { start 3562 drivers/dma/ste_dma40.c __func__, val, &res->start); start 3564 drivers/dma/ste_dma40.c writel(res->start, base->virtbase + D40_DREG_LCPA); start 3566 drivers/dma/ste_dma40.c base->lcpa_base = ioremap(res->start, resource_size(res)); start 3582 drivers/dma/ste_dma40.c base->lcla_pool.base = ioremap(res->start, start 3589 drivers/dma/ste_dma40.c writel(res->start, base->virtbase + D40_DREG_LCLA); start 1457 drivers/dma/tegra20-apb-dma.c tdc->irq = res->start; start 2072 drivers/dma/ti/edma.c xbar = devm_ioremap(dev, res.start, resource_size(&res)); start 634 drivers/dma/timb_dma.c if (!request_mem_region(iomem->start, resource_size(iomem), start 647 drivers/dma/timb_dma.c td->membase = ioremap(iomem->start, resource_size(iomem)); start 737 drivers/dma/timb_dma.c release_mem_region(iomem->start, resource_size(iomem)); start 754 drivers/dma/timb_dma.c release_mem_region(iomem->start, resource_size(iomem)); start 1185 drivers/dma/txx9dmac.c if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), start 1189 drivers/dma/txx9dmac.c ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); start 1628 drivers/dma/xgene-dma.c pdma->csr_dma = devm_ioremap(&pdev->dev, res->start, start 1642 drivers/dma/xgene-dma.c pdma->csr_ring = devm_ioremap(&pdev->dev, res->start, start 1656 drivers/dma/xgene-dma.c pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start, start 1672 drivers/dma/xgene-dma.c pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start, start 792 drivers/edac/altera_edac.c if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), start 816 drivers/edac/altera_edac.c drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 1059 drivers/edac/altera_edac.c base = res.start; start 2203 drivers/edac/altera_edac.c base = res.start; start 258 drivers/edac/aspeed_edac.c r.start, resource_size(&r), PAGE_SHIFT); start 260 drivers/edac/aspeed_edac.c csrow->first_page = r.start >> PAGE_SHIFT; start 148 drivers/edac/cell_edac.c csrow->first_page = r.start >> PAGE_SHIFT; start 289 drivers/edac/cpc925_edac.c unsigned long start, size; start 302 drivers/edac/cpc925_edac.c start = of_read_number(reg, aw); start 306 drivers/edac/cpc925_edac.c edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size); start 931 drivers/edac/cpc925_edac.c r->start, start 939 drivers/edac/cpc925_edac.c vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 1006 drivers/edac/cpc925_edac.c devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); start 444 drivers/edac/fsl_ddr_edac.c u32 start; start 453 drivers/edac/fsl_ddr_edac.c start = (cs_bnds & 0xffff0000) >> 16; start 456 drivers/edac/fsl_ddr_edac.c if (start == end) start 459 drivers/edac/fsl_ddr_edac.c start <<= (24 - PAGE_SHIFT); start 463 drivers/edac/fsl_ddr_edac.c csrow->first_page = start; start 466 drivers/edac/fsl_ddr_edac.c dimm->nr_pages = end + 1 - start; start 522 drivers/edac/fsl_ddr_edac.c if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), start 530 drivers/edac/fsl_ddr_edac.c pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); start 74 drivers/edac/highbank_l2_edac.c if (!devm_request_mem_region(&pdev->dev, r->start, start 81 drivers/edac/highbank_l2_edac.c drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 187 drivers/edac/highbank_mc_edac.c if (!devm_request_mem_region(&pdev->dev, r->start, start 194 drivers/edac/highbank_mc_edac.c base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 206 drivers/edac/mpc85xx_edac.c r.start += 0xe00; start 208 drivers/edac/mpc85xx_edac.c if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), start 215 drivers/edac/mpc85xx_edac.c pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); start 521 drivers/edac/mpc85xx_edac.c r.start += 0xe00; start 523 drivers/edac/mpc85xx_edac.c if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), start 530 drivers/edac/mpc85xx_edac.c pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); start 92 drivers/edac/mv64x60_edac.c pci_serr = ioremap(r->start, resource_size(r)); start 140 drivers/edac/mv64x60_edac.c r->start, start 150 drivers/edac/mv64x60_edac.c r->start, start 305 drivers/edac/mv64x60_edac.c r->start, start 315 drivers/edac/mv64x60_edac.c r->start, start 472 drivers/edac/mv64x60_edac.c r->start, start 482 drivers/edac/mv64x60_edac.c r->start, start 499 drivers/edac/mv64x60_edac.c r->start, start 509 drivers/edac/mv64x60_edac.c r->start, start 738 drivers/edac/mv64x60_edac.c r->start, start 748 drivers/edac/mv64x60_edac.c r->start, start 117 drivers/edac/sb_edac.c unsigned char start; start 146 drivers/edac/sb_edac.c return GET_BITFIELD(reg, table[interleave].start, start 274 drivers/eisa/eisa-bus.c edev->res[i].start = edev->res[i].end = 0; start 280 drivers/eisa/eisa-bus.c edev->res[i].start = SLOT_ADDRESS(root, slot) start 282 drivers/eisa/eisa-bus.c edev->res[i].end = edev->res[i].start + 0xff; start 286 drivers/eisa/eisa-bus.c edev->res[i].start = SLOT_ADDRESS(root, slot) start 288 drivers/eisa/eisa-bus.c edev->res[i].end = edev->res[i].start + 3; start 310 drivers/eisa/eisa-bus.c if (edev->res[i].start || edev->res[i].end) start 407 drivers/eisa/eisa-bus.c .start = 0, start 424 drivers/eisa/eisa-bus.c root->eisa_root_res.start = root->res->start; start 54 drivers/eisa/pci_eisa.c pci_eisa_root.bus_base_addr = bus_res->start; start 772 drivers/firewire/core-cdev.c region.start = a->offset; start 521 drivers/firewire/core-transaction.c { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, }; start 525 drivers/firewire/core-transaction.c { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, }; start 529 drivers/firewire/core-transaction.c { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; start 531 drivers/firewire/core-transaction.c { .start = CSR_REGISTER_BASE, start 534 drivers/firewire/core-transaction.c { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; start 568 drivers/firewire/core-transaction.c if (region->start & 0xffff000000000003ULL || start 569 drivers/firewire/core-transaction.c region->start >= region->end || start 577 drivers/firewire/core-transaction.c handler->offset = region->start; start 1036 drivers/firewire/core-transaction.c { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, start 1044 drivers/firewire/core-transaction.c int start; start 1056 drivers/firewire/core-transaction.c start = (offset - topology_map_region.start) / 4; start 1057 drivers/firewire/core-transaction.c memcpy(payload, &card->topology_map[start], length); start 1068 drivers/firewire/core-transaction.c { .start = CSR_REGISTER_BASE, start 767 drivers/firmware/arm_scmi/driver.c cinfo->payload = devm_ioremap(info->dev, res.start, size); start 944 drivers/firmware/arm_scpi.c pchan->rx_payload = devm_ioremap(dev, res.start, size); start 52 drivers/firmware/efi/earlycon.c static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) start 57 drivers/firmware/efi/earlycon.c return efi_fb + start; start 60 drivers/firmware/efi/earlycon.c return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); start 983 drivers/firmware/efi/efi.c res->start = addr; start 32 drivers/firmware/efi/fake_mem.c if (m1->range.start < m2->range.start) start 34 drivers/firmware/efi/fake_mem.c if (m1->range.start > m2->range.start) start 86 drivers/firmware/efi/fake_mem.c u64 start = 0, mem_size = 0, attribute = 0; start 95 drivers/firmware/efi/fake_mem.c start = memparse(p+1, &p); start 107 drivers/firmware/efi/fake_mem.c fake_mems[nr_fake_mem].range.start = start; start 108 drivers/firmware/efi/fake_mem.c fake_mems[nr_fake_mem].range.end = start + mem_size - 1; start 121 drivers/firmware/efi/fake_mem.c fake_mems[i].attribute, fake_mems[i].range.start, start 131 drivers/firmware/efi/libstub/arm32-stub.c u64 start, end; start 134 drivers/firmware/efi/libstub/arm32-stub.c start = desc->phys_addr; start 135 drivers/firmware/efi/libstub/arm32-stub.c end = start + desc->num_pages * EFI_PAGE_SIZE; start 138 drivers/firmware/efi/libstub/arm32-stub.c if (start >= dram_base + MAX_UNCOMP_KERNEL_SIZE || start 153 drivers/firmware/efi/libstub/arm32-stub.c start = max(start, (u64)dram_base); start 159 drivers/firmware/efi/libstub/arm32-stub.c (end - start) / EFI_PAGE_SIZE, start 160 drivers/firmware/efi/libstub/arm32-stub.c &start); start 208 drivers/firmware/efi/libstub/efi-stub-helper.c u64 start, end; start 217 drivers/firmware/efi/libstub/efi-stub-helper.c start = desc->phys_addr; start 218 drivers/firmware/efi/libstub/efi-stub-helper.c end = start + desc->num_pages * EFI_PAGE_SIZE; start 223 drivers/firmware/efi/libstub/efi-stub-helper.c if ((start + size) > end) start 226 drivers/firmware/efi/libstub/efi-stub-helper.c if (round_down(end - size, align) < start) start 229 drivers/firmware/efi/libstub/efi-stub-helper.c start = round_down(end - size, align); start 235 drivers/firmware/efi/libstub/efi-stub-helper.c if (start == 0x0) start 238 drivers/firmware/efi/libstub/efi-stub-helper.c if (start > max_addr) start 239 drivers/firmware/efi/libstub/efi-stub-helper.c max_addr = start; start 301 drivers/firmware/efi/libstub/efi-stub-helper.c u64 start, end; start 311 drivers/firmware/efi/libstub/efi-stub-helper.c start = desc->phys_addr; start 312 drivers/firmware/efi/libstub/efi-stub-helper.c end = start + desc->num_pages * EFI_PAGE_SIZE; start 314 drivers/firmware/efi/libstub/efi-stub-helper.c if (start < min) start 315 drivers/firmware/efi/libstub/efi-stub-helper.c start = min; start 317 drivers/firmware/efi/libstub/efi-stub-helper.c start = round_up(start, align); start 318 drivers/firmware/efi/libstub/efi-stub-helper.c if ((start + size) > end) start 323 drivers/firmware/efi/libstub/efi-stub-helper.c nr_pages, &start); start 325 drivers/firmware/efi/libstub/efi-stub-helper.c *addr = start; start 220 drivers/firmware/efi/memmap.c u64 start, end; start 223 drivers/firmware/efi/memmap.c start = md->phys_addr; start 224 drivers/firmware/efi/memmap.c end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; start 227 drivers/firmware/efi/memmap.c m_start = range->start; start 230 drivers/firmware/efi/memmap.c if (m_start <= start) { start 232 drivers/firmware/efi/memmap.c if (start < m_end && m_end < end) start 236 drivers/firmware/efi/memmap.c if (start < m_start && m_start < end) { start 262 drivers/firmware/efi/memmap.c u64 start, end; start 266 drivers/firmware/efi/memmap.c m_start = mem->range.start; start 288 drivers/firmware/efi/memmap.c start = md->phys_addr; start 291 drivers/firmware/efi/memmap.c if (m_start <= start && end <= m_end) start 294 drivers/firmware/efi/memmap.c if (m_start <= start && start 295 drivers/firmware/efi/memmap.c (start < m_end && m_end < end)) { start 309 drivers/firmware/efi/memmap.c if ((start < m_start && m_start < end) && m_end < end) { start 330 drivers/firmware/efi/memmap.c if ((start < m_start && m_start < end) && start 7 drivers/firmware/efi/tpm.c #define TPM_MEMREMAP(start, size) early_memremap(start, size) start 8 drivers/firmware/efi/tpm.c #define TPM_MEMUNMAP(start, size) early_memunmap(start, size) start 135 drivers/firmware/google/coreboot_table.c if (!res->start || !len) start 139 drivers/firmware/google/coreboot_table.c header = memremap(res->start, sizeof(*header), MEMREMAP_WB); start 151 drivers/firmware/google/coreboot_table.c ptr = memremap(res->start, len, MEMREMAP_WB); start 57 drivers/firmware/google/framebuffer-coreboot.c res.start = fb->physical_address; start 59 drivers/firmware/google/framebuffer-coreboot.c res.end = res.start + length - 1; start 60 drivers/firmware/google/framebuffer-coreboot.c if (res.end <= res.start) start 85 drivers/firmware/google/gsmi.c u8 *start; /* start of buffer */ start 159 drivers/firmware/google/gsmi.c smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL, start 161 drivers/firmware/google/gsmi.c if (!smibuf->start) { start 169 drivers/firmware/google/gsmi.c smibuf->address = (u32)virt_to_phys(smibuf->start); start 177 drivers/firmware/google/gsmi.c if (smibuf->start) start 178 drivers/firmware/google/gsmi.c dma_pool_free(gsmi_dev.dma_pool, smibuf->start, start 332 drivers/firmware/google/gsmi.c memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length); start 333 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.name_buf->start, name, name_len * 2); start 336 drivers/firmware/google/gsmi.c memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); start 339 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 340 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); start 351 drivers/firmware/google/gsmi.c memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param)); start 359 drivers/firmware/google/gsmi.c memcpy(data, gsmi_dev.data_buf->start, *data_size); start 398 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.name_buf->start, name, *name_size); start 401 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 402 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); start 413 drivers/firmware/google/gsmi.c memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param)); start 416 drivers/firmware/google/gsmi.c memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); start 457 drivers/firmware/google/gsmi.c memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length); start 458 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.name_buf->start, name, name_len * 2); start 461 drivers/firmware/google/gsmi.c memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); start 462 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.data_buf->start, data, data_size); start 465 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 466 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); start 511 drivers/firmware/google/gsmi.c memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); start 512 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.data_buf->start, buf, param.data_len); start 515 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 516 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); start 563 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 564 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); start 590 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 635 drivers/firmware/google/gsmi.c memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); start 636 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry)); start 640 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 641 drivers/firmware/google/gsmi.c memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); start 810 drivers/firmware/google/gsmi.c memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); start 28 drivers/firmware/google/memconsole-x86-legacy.c u16 start; start 37 drivers/firmware/google/memconsole-x86-legacy.c u16 start; start 57 drivers/firmware/google/memconsole-x86-legacy.c hdr->v1.buffer_addr, hdr->v1.start, start 70 drivers/firmware/google/memconsole-x86-legacy.c hdr->v2.buffer_addr, hdr->v2.start, start 73 drivers/firmware/google/memconsole-x86-legacy.c memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start); start 74 drivers/firmware/google/memconsole-x86-legacy.c memconsole_length = hdr->v2.end - hdr->v2.start; start 31 drivers/firmware/memmap.c u64 start; /* start of the memory range */ start 48 drivers/firmware/memmap.c firmware_map_find_entry(u64 start, u64 end, const char *type); start 59 drivers/firmware/memmap.c static struct memmap_attribute memmap_start_attr = __ATTR_RO(start); start 141 drivers/firmware/memmap.c static int firmware_map_add_entry(u64 start, u64 end, start 145 drivers/firmware/memmap.c BUG_ON(start > end); start 147 drivers/firmware/memmap.c entry->start = start; start 218 drivers/firmware/memmap.c firmware_map_find_entry_in_list(u64 start, u64 end, const char *type, start 224 drivers/firmware/memmap.c if ((entry->start == start) && (entry->end == end) && start 245 drivers/firmware/memmap.c firmware_map_find_entry(u64 start, u64 end, const char *type) start 247 drivers/firmware/memmap.c return firmware_map_find_entry_in_list(start, end, type, &map_entries); start 262 drivers/firmware/memmap.c firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type) start 264 drivers/firmware/memmap.c return firmware_map_find_entry_in_list(start, end, type, start 281 drivers/firmware/memmap.c int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type) start 285 drivers/firmware/memmap.c entry = firmware_map_find_entry(start, end - 1, type); start 289 drivers/firmware/memmap.c entry = firmware_map_find_entry_bootmem(start, end - 1, type); start 303 drivers/firmware/memmap.c firmware_map_add_entry(start, end, type, entry); start 323 drivers/firmware/memmap.c int __init firmware_map_add_early(u64 start, u64 end, const char *type) start 332 drivers/firmware/memmap.c return firmware_map_add_entry(start, end, type, entry); start 345 drivers/firmware/memmap.c int __meminit firmware_map_remove(u64 start, u64 end, const char *type) start 350 drivers/firmware/memmap.c entry = firmware_map_find_entry(start, end - 1, type); start 372 drivers/firmware/memmap.c (unsigned long long)entry->start); start 403 drivers/firmware/qcom_scm.c *addr = res.start + offset; start 245 drivers/firmware/qemu_fw_cfg.c fw_cfg_p_base = range->start; start 273 drivers/firmware/qemu_fw_cfg.c fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start; start 274 drivers/firmware/qemu_fw_cfg.c fw_cfg_reg_data = fw_cfg_dev_base + data->start; start 282 drivers/firmware/qemu_fw_cfg.c fw_cfg_reg_dma = fw_cfg_dev_base + dma->start; start 835 drivers/firmware/qemu_fw_cfg.c res[0].start = base; start 843 drivers/firmware/qemu_fw_cfg.c res[1].start = ctrl_off; start 846 drivers/firmware/qemu_fw_cfg.c res[2].start = data_off; start 851 drivers/firmware/qemu_fw_cfg.c res[3].start = dma_off; start 879 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[0].start); start 883 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[0].start, start 884 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[1].start, start 885 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[2].start); start 889 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[0].start, start 890 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[1].start, start 891 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[2].start, start 892 drivers/firmware/qemu_fw_cfg.c fw_cfg_cmdline_dev->resource[3].start); start 164 drivers/firmware/tegra/bpmp.c ktime_t start, now; start 166 drivers/firmware/tegra/bpmp.c start = ns_to_ktime(local_clock()); start 173 drivers/firmware/tegra/bpmp.c } while (ktime_us_delta(now, start) < timeout); start 548 drivers/firmware/tegra/bpmp.c ktime_t start, end; start 564 drivers/firmware/tegra/bpmp.c start = ktime_get(); start 573 drivers/firmware/tegra/bpmp.c ktime_to_us(ktime_sub(end, start))); start 3215 drivers/firmware/ti_sci.c return res->desc[set].start + free_bit; start 3236 drivers/firmware/ti_sci.c if (res->desc[set].start <= id && start 3237 drivers/firmware/ti_sci.c (res->desc[set].num + res->desc[set].start) > id) start 3238 drivers/firmware/ti_sci.c clear_bit(id - res->desc[set].start, start 3306 drivers/firmware/ti_sci.c &res->desc[i].start, start 3311 drivers/firmware/ti_sci.c res->desc[i].start = 0; start 3317 drivers/firmware/ti_sci.c dev_id, resource_subtype, res->desc[i].start, start 497 drivers/fpga/dfl-afu-main.c resource_size(res), res->start, start 518 drivers/fpga/dfl-afu-main.c resource_size(res), res->start, start 89 drivers/fpga/dfl-pci.c resource_size_t start, len; start 113 drivers/fpga/dfl-pci.c start = pci_resource_start(pcidev, 0); start 116 drivers/fpga/dfl-pci.c dfl_fpga_enum_info_add_dfl(info, start, len, base); start 144 drivers/fpga/dfl-pci.c start = pci_resource_start(pcidev, bar) + offset; start 147 drivers/fpga/dfl-pci.c dfl_fpga_enum_info_add_dfl(info, start, len, start 151 drivers/fpga/dfl-pci.c start = pci_resource_start(pcidev, 0); start 154 drivers/fpga/dfl-pci.c dfl_fpga_enum_info_add_dfl(info, start, len, base); start 616 drivers/fpga/dfl.c static inline u32 feature_size(void __iomem *start) start 618 drivers/fpga/dfl.c u64 v = readq(start + DFH); start 624 drivers/fpga/dfl.c static u64 feature_id(void __iomem *start) start 626 drivers/fpga/dfl.c u64 v = readq(start + DFH); start 667 drivers/fpga/dfl.c finfo->mmio_res.start = dfl->start + ofst; start 668 drivers/fpga/dfl.c finfo->mmio_res.end = finfo->mmio_res.start + size - 1; start 792 drivers/fpga/dfl.c void __iomem *start = dfl->ioaddr; start 799 drivers/fpga/dfl.c for (; start < end; start += ofst) { start 800 drivers/fpga/dfl.c if (end - start < DFH_SIZE) { start 805 drivers/fpga/dfl.c ret = parse_feature(binfo, dfl, start - dfl->ioaddr); start 809 drivers/fpga/dfl.c v = readq(start + DFH); start 876 drivers/fpga/dfl.c resource_size_t start, resource_size_t len, start 885 drivers/fpga/dfl.c dfl->start = start; start 387 drivers/fpga/dfl.h resource_size_t start; start 397 drivers/fpga/dfl.h resource_size_t start, resource_size_t len, start 22 drivers/fpga/fpga-region.c struct device *start, const void *data, start 27 drivers/fpga/fpga-region.c dev = class_find_device(fpga_region_class, start, data, match); start 1301 drivers/fsi/fsi-master-ast-cf.c master->cf_mem_addr = (uint32_t)res.start; start 417 drivers/fsi/fsi-occ.c unsigned long start; start 441 drivers/fsi/fsi-occ.c start = jiffies; start 451 drivers/fsi/fsi-occ.c if (time_after(jiffies, start + timeout)) { start 262 drivers/gpio/gpio-ath79.c ctrl->base = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 323 drivers/gpio/gpio-cs5535.c if (!devm_request_region(&pdev->dev, res->start, resource_size(res), start 330 drivers/gpio/gpio-cs5535.c cs5535_gpio_chip.base = res->start; start 298 drivers/gpio/gpio-em.c p->base0 = devm_ioremap_nocache(dev, io[0]->start, start 303 drivers/gpio/gpio-em.c p->base1 = devm_ioremap_nocache(dev, io[1]->start, start 349 drivers/gpio/gpio-em.c if (devm_request_irq(dev, irq[0]->start, start 355 drivers/gpio/gpio-em.c if (devm_request_irq(dev, irq[1]->start, start 281 drivers/gpio/gpio-htc-egpio.c ei->chained_irq = res->start; start 287 drivers/gpio/gpio-htc-egpio.c ei->base_addr = devm_ioremap_nocache(&pdev->dev, res->start, start 291 drivers/gpio/gpio-htc-egpio.c pr_debug("EGPIO phys=%08x virt=%p\n", (u32)res->start, ei->base_addr); start 56 drivers/gpio/gpio-ich.c #define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start) start 57 drivers/gpio/gpio-ich.c #define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start) start 370 drivers/gpio/gpio-ich.c if (!res_base || !res_base->start || !res_base->end) start 377 drivers/gpio/gpio-ich.c res_base->start + ichx_priv.desc->regs[0][i], start 449 drivers/gpio/gpio-ich.c if (!devm_request_region(dev, res_pm->start, resource_size(res_pm), start 232 drivers/gpio/gpio-ixp4xx.c g->fwnode = irq_domain_alloc_fwnode(&res->start); start 345 drivers/gpio/gpio-lynxpoint.c lg->reg_base = io_rc->start; start 371 drivers/gpio/gpio-lynxpoint.c if (irq_rc && irq_rc->start) { start 384 drivers/gpio/gpio-lynxpoint.c girq->parents[0] = (unsigned)irq_rc->start; start 148 drivers/gpio/gpio-menz127.c men_z127_gpio->reg_base = ioremap(men_z127_gpio->mem->start, start 507 drivers/gpio/gpio-rcar.c p->irq_parent = irq->start; start 508 drivers/gpio/gpio-rcar.c if (devm_request_irq(dev, irq->start, gpio_rcar_irq_handler, start 143 drivers/gpio/gpio-rdc321x.c rdc321x_gpio_dev->reg1_ctrl_base = r->start; start 144 drivers/gpio/gpio-rdc321x.c rdc321x_gpio_dev->reg1_data_base = r->start + 0x4; start 152 drivers/gpio/gpio-rdc321x.c rdc321x_gpio_dev->reg2_ctrl_base = r->start; start 153 drivers/gpio/gpio-rdc321x.c rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; start 156 drivers/gpio/gpio-sch.c if (!devm_request_region(&pdev->dev, res->start, resource_size(res), start 161 drivers/gpio/gpio-sch.c sch->iobase = res->start; start 72 drivers/gpio/gpio-tegra186.c unsigned int start = 0, i; start 77 drivers/gpio/gpio-tegra186.c if (*pin >= start && *pin < start + port->pins) { start 78 drivers/gpio/gpio-tegra186.c *pin -= start; start 82 drivers/gpio/gpio-tegra186.c start += port->pins; start 248 drivers/gpio/gpio-tqmx86.c io_base = devm_ioport_map(&pdev->dev, res->start, resource_size(res)); start 335 drivers/gpio/gpio-ts5500.c priv->hwirq = res->start; start 246 drivers/gpio/gpio-vx855.c vg->io_gpi = res_gpi->start; start 247 drivers/gpio/gpio-vx855.c vg->io_gpo = res_gpo->start; start 258 drivers/gpio/gpio-vx855.c if (!devm_request_region(&pdev->dev, res_gpi->start, start 263 drivers/gpio/gpio-vx855.c if (!devm_request_region(&pdev->dev, res_gpo->start, start 174 drivers/gpio/gpio-xgene.c gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, start 774 drivers/gpio/gpiolib-of.c u32 start, count; start 783 drivers/gpio/gpiolib-of.c i, &start); start 786 drivers/gpio/gpiolib-of.c if (start >= chip->ngpio || start + count >= chip->ngpio) start 789 drivers/gpio/gpiolib-of.c bitmap_clear(chip->valid_mask, start, count); start 5067 drivers/gpio/gpiolib.c .start = gpiolib_seq_start, start 295 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c adev->acp.acp_res[0].start = acp_base; start 300 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; start 305 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; start 310 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; start 315 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); start 316 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; start 1404 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c u8 *start = (u8*)v3; start 1407 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); start 834 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c offset = m->start * AMDGPU_GPU_PAGE_SIZE; start 773 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c res->start > 0x100000000ull) start 3409 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || start 193 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h uint32_t wave, uint32_t thread, uint32_t start, start 196 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h uint32_t wave, uint32_t start, uint32_t size, start 92 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c uint64_t start, size; start 99 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; start 100 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; start 101 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c drm_mm_init(&mgr->mm, start, size); start 155 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c return (node->node.start != AMDGPU_BO_INVALID_OFFSET); start 204 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c mem->start = node->node.start; start 243 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c node->node.start = AMDGPU_BO_INVALID_OFFSET; start 257 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c mem->start = node->node.start; start 287 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c if (node->node.start != AMDGPU_BO_INVALID_OFFSET) start 171 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c unsigned long start, start 179 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) start 203 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c unsigned long start = update->start; start 217 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c it = interval_tree_iter_first(&amn->objects, start, end); start 227 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c it = interval_tree_iter_next(it, start, end); start 229 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c amdgpu_mn_invalidate_node(node, start, end); start 252 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c unsigned long start = update->start; start 263 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c it = interval_tree_iter_first(&amn->objects, start, end); start 274 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c it = interval_tree_iter_next(it, start, end); start 280 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c start, end)) start 402 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c addr = min(it->start, addr); start 414 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c node->it.start = addr; start 583 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) start 1350 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c offset = bo->mem.start << PAGE_SHIFT; start 1371 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c offset = bo->mem.start << PAGE_SHIFT; start 1438 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); start 52 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h uint64_t start; start 212 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h if (node->start < fpfn) start 219 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c .start = amdgpu_perf_start, start 762 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c unsigned int start = div64_ul(ppos + element_size - 1, element_size); start 773 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c for (; start < end && start < bps_count; start++) start 776 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c bps[start].bp, start 777 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c bps[start].size, start 778 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c amdgpu_ras_badpage_flags_str(bps[start].flags)); start 169 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void (*emit_tmz)(struct amdgpu_ring *ring, bool start); start 128 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h void (*start)(struct amdgpu_device *adev); start 249 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __field(long, start) start 257 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->start = mapping->start; start 263 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->bo, __entry->start, __entry->last, start 273 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __field(long, start) start 281 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->start = mapping->start; start 287 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->bo, __entry->start, __entry->last, start 301 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->soffset = mapping->start; start 267 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { start 268 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c addr = mm_node->start << PAGE_SHIFT; start 355 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) { start 368 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) { start 608 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c return ((nodes->start + nodes->size) << PAGE_SHIFT) start 725 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c mem->bus.offset = mem->start << PAGE_SHIFT; start 758 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + start 793 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c unsigned long start = gtt->userptr; start 827 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c range->start = start; start 828 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c range->end = start + ttm->num_pages * PAGE_SIZE; start 840 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c vma = find_vma(mm, start); start 841 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (unlikely(!vma || start < vma->vm_start)) { start 1081 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; start 1105 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) start 1110 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c bo->mem.start = addr >> PAGE_SHIFT; start 1133 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c gtt->offset = (u64)tmp.start << PAGE_SHIFT; start 1144 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c bo->offset = (bo->mem.start << PAGE_SHIFT) + start 1373 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, start 1386 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (gtt->userptr > end || gtt->userptr + size <= start) start 1511 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (place->fpfn < (node->start + node->size) && start 1512 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c !(place->lpfn && place->lpfn <= node->start)) start 1555 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c pos = (nodes->start << PAGE_SHIFT) + offset; start 1588 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { start 1590 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c pos = (nodes->start << PAGE_SHIFT); start 126 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, start 810 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c uint64_t start, end; start 820 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c start = amdgpu_bo_gpu_offset(bo); start 822 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c end = (mapping->last + 1 - mapping->start); start 823 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c end = end * AMDGPU_GPU_PAGE_SIZE + start; start 825 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; start 826 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c start += addr; start 829 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c lower_32_bits(start)); start 831 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c upper_32_bits(start)); start 835 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c if ((end - start) < ctx->buf_sizes[cmd]) { start 837 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c (unsigned)(end - start), start 843 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c if ((end - start) < ctx->buf_sizes[4]) { start 845 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c (unsigned)(end - start), start 855 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c if ((start >> 28) != ((end - 1) >> 28)) { start 857 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c start, end); start 862 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { start 864 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c start, end); start 653 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; start 60 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c #define START(node) ((node)->start) start 365 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_vm *vm, uint64_t start, start 368 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c cursor->pfn = start; start 491 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_vm_pt_cursor *start, start 494 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (start) start 495 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c *cursor = *start; start 510 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, start 513 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c return entry && (!start || entry != start->entry); start 541 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ start 542 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \ start 544 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_pt_continue_dfs((start), (entry)); \ start 926 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_vm_pt_cursor *start) start 933 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) start 936 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (start) start 937 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_free_table(start->entry); start 1318 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t start, uint64_t end, uint64_t flags, start 1357 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); start 1362 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c *frag_end = start + (1 << *frag); start 1381 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t start, uint64_t end, start 1386 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t frag_start = start, frag_end; start 1394 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_pt_start(adev, params->vm, start, &cursor); start 1509 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t start, uint64_t last, start 1530 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); start 1567 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t pfn, start = mapping->start; start 1616 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c addr = nodes->start << PAGE_SHIFT; start 1650 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c last = min((uint64_t)mapping->last, start + max_entries - 1); start 1652 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c start, last, flags, addr, start 1657 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE; start 1662 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c start = last + 1; start 1664 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c } while (unlikely(start != mapping->last + 1)); start 1944 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start < AMDGPU_GMC_HOLE_START) start 1948 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start, mapping->last, start 2144 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c tmp->start, tmp->last + 1); start 2152 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start = saddr; start 2215 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start = saddr; start 2250 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->start == saddr) start 2258 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->start == saddr) start 2322 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (tmp->start < saddr) { start 2323 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c before->start = tmp->start; start 2333 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c after->start = eaddr + 1; start 2336 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c after->offset += after->start - tmp->start; start 2353 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (tmp->start < saddr) start 2354 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c tmp->start = saddr; start 197 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c uint64_t start = node->start << PAGE_SHIFT; start 198 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c uint64_t end = (node->size + node->start) << PAGE_SHIFT; start 200 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c if (start >= adev->gmc.visible_vram_size) start 204 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c adev->gmc.visible_vram_size : end) - start; start 226 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) start 247 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c unsigned long start; start 249 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c start = node->start + node->size; start 250 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c if (start > mem->num_pages) start 251 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c start -= mem->num_pages; start 253 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c start = 0; start 254 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c mem->start = max(mem->start, start); start 319 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c mem->start = 0; start 61 drivers/gpu/drm/amd/amdgpu/atom.c uint16_t start; start 743 drivers/gpu/drm/amd/amdgpu/atom.c if (ctx->last_jump == (ctx->start + target)) { start 756 drivers/gpu/drm/amd/amdgpu/atom.c ctx->last_jump = ctx->start + target; start 759 drivers/gpu/drm/amd/amdgpu/atom.c *ptr = ctx->start + target; start 870 drivers/gpu/drm/amd/amdgpu/atom.c ctx->ctx->data_block = ctx->start; start 1017 drivers/gpu/drm/amd/amdgpu/atom.c *ptr = ctx->start + target; start 1221 drivers/gpu/drm/amd/amdgpu/atom.c ectx.start = base; start 255 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start); start 1158 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c uint32_t wave, uint32_t start, start 1164 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, start 1170 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c uint32_t start, uint32_t size, start 1175 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c start + SQIND_WAVE_VGPRS_OFFSET, size, dst); start 1968 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c adev->gfx.rlc.funcs->start(adev); start 4282 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c .start = gfx_v10_0_rlc_start start 4784 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) start 4787 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ start 2562 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c adev->gfx.rlc.funcs->start(adev); start 3037 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c uint32_t wave, uint32_t start, start 3042 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c start + SQIND_WAVE_SGPRS_OFFSET, size, dst); start 3064 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c .start = gfx_v6_0_rlc_start start 3568 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c adev->gfx.rlc.funcs->start(adev); start 4191 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c uint32_t wave, uint32_t start, start 4196 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c start + SQIND_WAVE_SGPRS_OFFSET, size, dst); start 4224 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c .start = gfx_v7_0_rlc_start start 4140 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c adev->gfx.rlc.funcs->start(adev); start 5177 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c adev->gfx.rlc.funcs->start(adev); start 5291 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c uint32_t wave, uint32_t start, start 5296 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c start + SQIND_WAVE_SGPRS_OFFSET, size, dst); start 5639 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c .start = gfx_v8_0_rlc_start start 1822 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c uint32_t wave, uint32_t start, start 1827 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c start + SQIND_WAVE_SGPRS_OFFSET, size, dst); start 1832 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c uint32_t start, uint32_t size, start 1837 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c start + SQIND_WAVE_VGPRS_OFFSET, size, dst); start 3059 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c adev->gfx.rlc.funcs->start(adev); start 4830 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c .start = gfx_v9_0_rlc_start start 5340 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) start 5343 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ start 836 drivers/gpu/drm/amd/amdkfd/kfd_device.c err = kfd->dqm->ops.start(kfd->dqm); start 1726 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->ops.start = start_cpsch; start 1744 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->ops.start = start_nocpsch; start 103 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h int (*start)(struct device_queue_manager *dqm); start 224 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c uint8_t *start = (uint8_t *)voltage_object_info_table; start 228 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); start 576 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c uint8_t *start = (uint8_t *)gpio_lookup_table; start 580 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset); start 37 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c unsigned long start = (unsigned long)voltage_object_info_table; start 41 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c (const union atom_voltage_object_v4 *)(start + offset); start 187 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c unsigned long start = (unsigned long)gpio_lookup_table; start 191 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c (const struct atom_gpio_pin_assignment *)(start + offset); start 31 drivers/gpu/drm/arm/display/include/malidp_utils.h u32 start; start 35 drivers/gpu/drm/arm/display/include/malidp_utils.h static inline void set_range(struct malidp_range *rg, u32 start, u32 end) start 37 drivers/gpu/drm/arm/display/include/malidp_utils.h rg->start = start; start 43 drivers/gpu/drm/arm/display/include/malidp_utils.h return (v >= rg->start) && (v <= rg->end); start 94 drivers/gpu/drm/arm/malidp_crtc.c u16 start; start 133 drivers/gpu/drm/arm/malidp_crtc.c delta_in = segments[i].end - segments[i].start; start 135 drivers/gpu/drm/arm/malidp_crtc.c out_start = drm_color_lut_extract(lut[segments[i].start].green, start 85 drivers/gpu/drm/armada/armada_drv.c if (!devm_request_mem_region(dev, mem->start, resource_size(mem), start 136 drivers/gpu/drm/armada/armada_drv.c drm_mm_init(&priv->linear, mem->start, resource_size(mem)); start 153 drivers/gpu/drm/armada/armada_gem.c ptr = ioremap_wc(obj->linear->start, size); start 166 drivers/gpu/drm/armada/armada_gem.c obj->phys_addr = obj->linear->start; start 167 drivers/gpu/drm/armada/armada_gem.c obj->dev_addr = obj->linear->start; start 77 drivers/gpu/drm/bochs/bochs_hw.c size_t i, start = block * EDID_LENGTH; start 79 drivers/gpu/drm/bochs/bochs_hw.c if (start + len > 0x400 /* vga register offset */) start 83 drivers/gpu/drm/bochs/bochs_hw.c buf[i] = readb(bochs->mmio + start + i); start 71 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c unsigned char start = 0x00; start 80 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c .buf = &start, start 112 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c start = 0x00; start 238 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c u32 start, stop; start 246 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c start = dw->buf_addr + offset; start 247 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c stop = start + period - 1; start 250 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0); start 2821 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c audio.phys = iores->start; start 27 drivers/gpu/drm/bridge/tc358764.c #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) start 28 drivers/gpu/drm/bridge/tc358764.c #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) start 1586 drivers/gpu/drm/drm_atomic_helper.c ktime_t start; start 1602 drivers/gpu/drm/drm_atomic_helper.c start = ktime_get(); start 1622 drivers/gpu/drm/drm_atomic_helper.c commit_time_ms = ktime_ms_delta(ktime_get(), start); start 278 drivers/gpu/drm/drm_bufs.c map->offset += dev->hose->mem_space->start; start 1505 drivers/gpu/drm/drm_edid.c unsigned char start = block * EDID_LENGTH; start 1528 drivers/gpu/drm/drm_edid.c .buf = &start, start 3695 drivers/gpu/drm/drm_edid.c cea_db_offsets(const u8 *cea, int *start, int *end) start 3715 drivers/gpu/drm/drm_edid.c *start = 3; start 3716 drivers/gpu/drm/drm_edid.c *end = *start + cea[2]; start 3719 drivers/gpu/drm/drm_edid.c *start = 4; start 3804 drivers/gpu/drm/drm_edid.c #define for_each_cea_db(cea, i, start, end) \ start 3805 drivers/gpu/drm/drm_edid.c for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) start 3856 drivers/gpu/drm/drm_edid.c int i, start, end; start 3858 drivers/gpu/drm/drm_edid.c if (cea_db_offsets(cea, &start, &end)) start 3861 drivers/gpu/drm/drm_edid.c for_each_cea_db(cea, i, start, end) { start 4118 drivers/gpu/drm/drm_edid.c int i, start, end; start 4120 drivers/gpu/drm/drm_edid.c if (cea_db_offsets(cea, &start, &end)) { start 4121 drivers/gpu/drm/drm_edid.c start = 0; start 4125 drivers/gpu/drm/drm_edid.c for_each_cea_db(cea, i, start, end) { start 4184 drivers/gpu/drm/drm_edid.c int i, start, end, dbl; start 4198 drivers/gpu/drm/drm_edid.c if (cea_db_offsets(cea, &start, &end)) { start 4203 drivers/gpu/drm/drm_edid.c for_each_cea_db(cea, i, start, end) { start 4245 drivers/gpu/drm/drm_edid.c int i, start, end, dbl; start 4259 drivers/gpu/drm/drm_edid.c if (cea_db_offsets(cea, &start, &end)) { start 4264 drivers/gpu/drm/drm_edid.c for_each_cea_db(cea, i, start, end) { start 4584 drivers/gpu/drm/drm_edid.c int i, start, end; start 4599 drivers/gpu/drm/drm_edid.c if (cea_db_offsets(edid_ext, &start, &end)) start 4602 drivers/gpu/drm/drm_edid.c for_each_cea_db(edid_ext, i, start, end) { start 659 drivers/gpu/drm/drm_fb_helper.c unsigned long start, end, min, max; start 666 drivers/gpu/drm/drm_fb_helper.c start = page->index << PAGE_SHIFT; start 667 drivers/gpu/drm/drm_fb_helper.c end = start + PAGE_SIZE - 1; start 668 drivers/gpu/drm/drm_fb_helper.c min = min(min, start); start 926 drivers/gpu/drm/drm_fb_helper.c if (cmap->start + cmap->len > 16) start 947 drivers/gpu/drm/drm_fb_helper.c palette[cmap->start + i] = value; start 967 drivers/gpu/drm/drm_fb_helper.c if (cmap->start + cmap->len > crtc->gamma_size) start 974 drivers/gpu/drm/drm_fb_helper.c memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); start 975 drivers/gpu/drm/drm_fb_helper.c memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); start 976 drivers/gpu/drm/drm_fb_helper.c memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); start 997 drivers/gpu/drm/drm_fb_helper.c if (!size || cmap->start + cmap->len > size) start 1005 drivers/gpu/drm/drm_fb_helper.c if (cmap->start || cmap->len != size) { start 1010 drivers/gpu/drm/drm_fb_helper.c for (i = 0; i < cmap->start; i++) { start 1015 drivers/gpu/drm/drm_fb_helper.c for (i = cmap->start + cmap->len; i < size; i++) { start 1023 drivers/gpu/drm/drm_fb_helper.c lut[cmap->start + i].red = cmap->red[i]; start 1024 drivers/gpu/drm/drm_fb_helper.c lut[cmap->start + i].green = cmap->green[i]; start 1025 drivers/gpu/drm/drm_fb_helper.c lut[cmap->start + i].blue = cmap->blue[i]; start 1091 drivers/gpu/drm/drm_fb_helper.c memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); start 1092 drivers/gpu/drm/drm_fb_helper.c memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); start 1093 drivers/gpu/drm/drm_fb_helper.c memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); start 72 drivers/gpu/drm/drm_memory.c offset -= dev->hose->mem_space->start; start 109 drivers/gpu/drm/drm_memory.c int drm_bind_agp(struct agp_memory *handle, unsigned int start) start 111 drivers/gpu/drm/drm_memory.c return agp_bind_memory(handle, start); start 920 drivers/gpu/drm/drm_mipi_dsi.c int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, start 923 drivers/gpu/drm/drm_mipi_dsi.c u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff }; start 944 drivers/gpu/drm/drm_mipi_dsi.c int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, start 947 drivers/gpu/drm/drm_mipi_dsi.c u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff }; start 132 drivers/gpu/drm/drm_mm.c node->start, node->size); start 139 drivers/gpu/drm/drm_mm.c node->start, node->size, buf); start 152 drivers/gpu/drm/drm_mm.c #define START(node) ((node)->start) start 153 drivers/gpu/drm/drm_mm.c #define LAST(node) ((node)->start + (node)->size - 1) start 160 drivers/gpu/drm/drm_mm.c __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) start 163 drivers/gpu/drm/drm_mm.c start, last) ?: (struct drm_mm_node *)&mm->head_node; start 202 drivers/gpu/drm/drm_mm.c if (node->start < parent->start) { start 343 drivers/gpu/drm/drm_mm.c u64 start, u64 end, u64 size, start 352 drivers/gpu/drm/drm_mm.c return find_hole(mm, start); start 402 drivers/gpu/drm/drm_mm.c u64 end = node->start + node->size; start 407 drivers/gpu/drm/drm_mm.c end = node->start + node->size; start 408 drivers/gpu/drm/drm_mm.c if (unlikely(end <= node->start)) start 412 drivers/gpu/drm/drm_mm.c hole = find_hole(mm, node->start); start 422 drivers/gpu/drm/drm_mm.c if (adj_start > node->start || adj_end < end) start 433 drivers/gpu/drm/drm_mm.c if (node->start > hole_start) start 542 drivers/gpu/drm/drm_mm.c node->start = adj_start; start 685 drivers/gpu/drm/drm_mm.c u64 start, start 689 drivers/gpu/drm/drm_mm.c DRM_MM_BUG_ON(start >= end); start 690 drivers/gpu/drm/drm_mm.c DRM_MM_BUG_ON(!size || size > end - start); start 704 drivers/gpu/drm/drm_mm.c DRM_MM_BUG_ON(end <= start); start 705 drivers/gpu/drm/drm_mm.c scan->range_start = start; start 840 drivers/gpu/drm/drm_mm.c return (node->start + node->size > scan->hit_start && start 841 drivers/gpu/drm/drm_mm.c node->start < scan->hit_end); start 907 drivers/gpu/drm/drm_mm.c void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) start 909 drivers/gpu/drm/drm_mm.c DRM_MM_BUG_ON(start + size <= start); start 922 drivers/gpu/drm/drm_mm.c mm->head_node.start = start + size; start 947 drivers/gpu/drm/drm_mm.c u64 start, size; start 951 drivers/gpu/drm/drm_mm.c start = drm_mm_hole_node_start(entry); start 953 drivers/gpu/drm/drm_mm.c start, start + size, size); start 971 drivers/gpu/drm/drm_mm.c drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, start 972 drivers/gpu/drm/drm_mm.c entry->start + entry->size, entry->size); start 46 drivers/gpu/drm/drm_print.c if (iterator->offset < iterator->start) { start 51 drivers/gpu/drm/drm_print.c if (iterator->offset + len <= iterator->start) { start 56 drivers/gpu/drm/drm_print.c copy = len - (iterator->start - iterator->offset); start 63 drivers/gpu/drm/drm_print.c str + (iterator->start - iterator->offset), copy); start 65 drivers/gpu/drm/drm_print.c iterator->offset = iterator->start + copy; start 68 drivers/gpu/drm/drm_print.c ssize_t pos = iterator->offset - iterator->start; start 93 drivers/gpu/drm/drm_print.c if (iterator->offset + len <= iterator->start) { start 99 drivers/gpu/drm/drm_print.c if ((iterator->offset >= iterator->start) && (len < iterator->remain)) { start 100 drivers/gpu/drm/drm_print.c ssize_t pos = iterator->offset - iterator->start; start 154 drivers/gpu/drm/drm_vm.c baddr -= dev->hose->mem_space->start; start 141 drivers/gpu/drm/drm_vma_manager.c unsigned long start, start 153 drivers/gpu/drm/drm_vma_manager.c offset = node->start; start 154 drivers/gpu/drm/drm_vma_manager.c if (start >= offset) { start 157 drivers/gpu/drm/drm_vma_manager.c if (start == offset) start 166 drivers/gpu/drm/drm_vma_manager.c offset = best->start + best->size; start 167 drivers/gpu/drm/drm_vma_manager.c if (offset < start + pages) start 120 drivers/gpu/drm/drm_vram_mm_helper.c mem->bus.offset = mem->start << PAGE_SHIFT; start 19 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c u32 *start; start 26 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c #define ST(start, num) { (start) >> 2, (num) } start 112 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c buf_offset = (ptr - state->start + start 130 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c buf_offset = (ptr - state->start + num) * 4; start 159 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c state.start = stream; start 190 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c __func__, op, buf - state.start); start 201 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c __func__, buf - state.start, size); start 22 drivers/gpu/drm/etnaviv/etnaviv_dump.c void *start; start 74 drivers/gpu/drm/etnaviv/etnaviv_dump.c hdr->file_offset = cpu_to_le32(iter->data - iter->start); start 157 drivers/gpu/drm/etnaviv/etnaviv_dump.c iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, start 159 drivers/gpu/drm/etnaviv/etnaviv_dump.c if (!iter.start) { start 166 drivers/gpu/drm/etnaviv/etnaviv_dump.c iter.hdr = iter.start; start 169 drivers/gpu/drm/etnaviv/etnaviv_dump.c memset(iter.hdr, 0, iter.data - iter.start); start 228 drivers/gpu/drm/etnaviv/etnaviv_dump.c dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); start 134 drivers/gpu/drm/etnaviv/etnaviv_mmu.c etnaviv_iommu_unmap(context, mapping->vram_node.start, start 267 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->iova = node->start; start 268 drivers/gpu/drm/etnaviv/etnaviv_mmu.c ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size, start 387 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->iova = node->start; start 388 drivers/gpu/drm/etnaviv/etnaviv_mmu.c ret = etnaviv_context_map(context, node->start, paddr, size, start 420 drivers/gpu/drm/etnaviv/etnaviv_mmu.c etnaviv_context_unmap(context, node->start, node->size); start 733 drivers/gpu/drm/exynos/exynos7_drm_decon.c ret = devm_request_irq(dev, res->start, decon_irq_handler, start 1107 drivers/gpu/drm/exynos/exynos_drm_dsi.c bool start = false; start 1138 drivers/gpu/drm/exynos/exynos_drm_dsi.c start = !list_empty(&dsi->transfer_list); start 1142 drivers/gpu/drm/exynos/exynos_drm_dsi.c if (start) start 1150 drivers/gpu/drm/exynos/exynos_drm_dsi.c bool start = true; start 1181 drivers/gpu/drm/exynos/exynos_drm_dsi.c start = !list_empty(&dsi->transfer_list); start 1189 drivers/gpu/drm/exynos/exynos_drm_dsi.c return start; start 1196 drivers/gpu/drm/exynos/exynos_drm_dsi.c bool start; start 1204 drivers/gpu/drm/exynos/exynos_drm_dsi.c start = !list_empty(&dsi->transfer_list); start 1206 drivers/gpu/drm/exynos/exynos_drm_dsi.c if (start) start 1336 drivers/gpu/drm/exynos/exynos_drm_fimc.c ret = devm_request_irq(dev, res->start, fimc_irq_handler, start 1202 drivers/gpu/drm/exynos/exynos_drm_fimd.c ret = devm_request_irq(dev, res->start, fimd_irq_handler, start 428 drivers/gpu/drm/exynos/exynos_drm_g2d.c unsigned long start, end; start 473 drivers/gpu/drm/exynos/exynos_drm_g2d.c start = userptr & PAGE_MASK; start 476 drivers/gpu/drm/exynos/exynos_drm_g2d.c npages = (end - start) >> PAGE_SHIFT; start 483 drivers/gpu/drm/exynos/exynos_drm_g2d.c ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, start 1282 drivers/gpu/drm/exynos/exynos_drm_gsc.c ctx->irq = res->start; start 397 drivers/gpu/drm/exynos/exynos_drm_mic.c mic->reg = devm_ioremap(dev, res.start, resource_size(&res)); start 40 drivers/gpu/drm/exynos/exynos_drm_plane.c static int exynos_plane_get_size(int start, unsigned length, unsigned last) start 42 drivers/gpu/drm/exynos/exynos_drm_plane.c int end = start + length; start 45 drivers/gpu/drm/exynos/exynos_drm_plane.c if (start <= 0) { start 48 drivers/gpu/drm/exynos/exynos_drm_plane.c } else if (start <= last) { start 49 drivers/gpu/drm/exynos/exynos_drm_plane.c size = min_t(unsigned, last - start, length); start 1121 drivers/gpu/drm/exynos/exynos_hdmi.c static void hdmi_start(struct hdmi_context *hdata, bool start) start 1124 drivers/gpu/drm/exynos/exynos_hdmi.c u32 val = start ? HDMI_TG_EN : 0; start 805 drivers/gpu/drm/exynos/exynos_mixer.c mixer_ctx->mixer_regs = devm_ioremap(dev, res->start, start 818 drivers/gpu/drm/exynos/exynos_mixer.c ret = devm_request_irq(dev, res->start, mixer_irq_handler, start 824 drivers/gpu/drm/exynos/exynos_mixer.c mixer_ctx->irq = res->start; start 863 drivers/gpu/drm/exynos/exynos_mixer.c mixer_ctx->vp_regs = devm_ioremap(dev, res->start, start 60 drivers/gpu/drm/gma500/gma_display.c unsigned long start, offset; start 80 drivers/gpu/drm/gma500/gma_display.c start = gtt->offset; start 110 drivers/gpu/drm/gma500/gma_display.c "Writing base %08lX %08lX %d %d\n", start, offset, x, y); start 116 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, offset + start); start 121 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->surf, start); start 60 drivers/gpu/drm/gma500/gtt.c offset = r->resource.start - dev_priv->gtt_mem->start; start 331 drivers/gpu/drm/gma500/gtt.c unsigned long start, end; start 335 drivers/gpu/drm/gma500/gtt.c start = r->start; start 336 drivers/gpu/drm/gma500/gtt.c end = r->start + dev_priv->gtt.stolen_size - 1; start 339 drivers/gpu/drm/gma500/gtt.c start = r->start + dev_priv->gtt.stolen_size; start 353 drivers/gpu/drm/gma500/gtt.c len, start, end, align, NULL, NULL); start 355 drivers/gpu/drm/gma500/gtt.c gt->offset = gt->resource.start - r->start; start 475 drivers/gpu/drm/gma500/gtt.c fudge.start = 0x40000000; start 569 drivers/gpu/drm/gma500/gtt.c size += range->resource.end - range->resource.start; start 44 drivers/gpu/drm/gma500/mdfld_dsi_output.h #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) start 45 drivers/gpu/drm/gma500/mdfld_dsi_output.h #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) start 46 drivers/gpu/drm/gma500/mdfld_dsi_output.h #define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) start 47 drivers/gpu/drm/gma500/mdfld_dsi_output.h #define FLD_MOD(orig, val, start, end) \ start 48 drivers/gpu/drm/gma500/mdfld_dsi_output.h (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) start 50 drivers/gpu/drm/gma500/mdfld_dsi_output.h #define REG_FLD_MOD(reg, val, start, end) \ start 51 drivers/gpu/drm/gma500/mdfld_dsi_output.h REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end)) start 54 drivers/gpu/drm/gma500/mdfld_dsi_output.h u32 val, int start, int end) start 58 drivers/gpu/drm/gma500/mdfld_dsi_output.h while (FLD_GET(REG_READ(reg), start, end) != val) { start 66 drivers/gpu/drm/gma500/mdfld_dsi_output.h #define REG_FLD_WAIT(reg, val, start, end) \ start 67 drivers/gpu/drm/gma500/mdfld_dsi_output.h REGISTER_FLD_WAIT(dev, reg, val, start, end) start 163 drivers/gpu/drm/gma500/mdfld_intel_display.c unsigned long start, offset; start 189 drivers/gpu/drm/gma500/mdfld_intel_display.c start = to_gtt_range(fb->obj[0])->offset; start 214 drivers/gpu/drm/gma500/mdfld_intel_display.c start, offset, x, y); start 217 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->surf, start); start 597 drivers/gpu/drm/gma500/oaktrail_crtc.c unsigned long start, offset; start 611 drivers/gpu/drm/gma500/oaktrail_crtc.c start = to_gtt_range(fb->obj[0])->offset; start 642 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE(map->surf, start); start 40 drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) start 41 drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) start 625 drivers/gpu/drm/i810/i810_dma.c unsigned int start = y * pitch + x * cpp; start 637 drivers/gpu/drm/i810/i810_dma.c OUT_RING(start); start 648 drivers/gpu/drm/i810/i810_dma.c OUT_RING(dev_priv->back_offset + start); start 659 drivers/gpu/drm/i810/i810_dma.c OUT_RING(dev_priv->depth_offset + start); start 689 drivers/gpu/drm/i810/i810_dma.c unsigned int start = dst; start 701 drivers/gpu/drm/i810/i810_dma.c OUT_RING(dev_priv->front_offset + start); start 703 drivers/gpu/drm/i810/i810_dma.c OUT_RING(dev_priv->back_offset + start); start 706 drivers/gpu/drm/i810/i810_dma.c OUT_RING(dev_priv->back_offset + start); start 708 drivers/gpu/drm/i810/i810_dma.c OUT_RING(dev_priv->front_offset + start); start 722 drivers/gpu/drm/i810/i810_dma.c unsigned long start = address - dev->agp->base; start 766 drivers/gpu/drm/i810/i810_dma.c OUT_RING(start | BB1_PROTECTED); start 767 drivers/gpu/drm/i810/i810_dma.c OUT_RING(start + used - 4); start 1041 drivers/gpu/drm/i810/i810_dma.c unsigned long start = address - dev->agp->base; start 1060 drivers/gpu/drm/i810/i810_dma.c DRM_DEBUG("start : %lx\n", start); start 1062 drivers/gpu/drm/i810/i810_dma.c DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); start 1074 drivers/gpu/drm/i810/i810_dma.c OUT_RING(start | BB1_PROTECTED); start 1075 drivers/gpu/drm/i810/i810_dma.c OUT_RING(start + used - 4); start 840 drivers/gpu/drm/i915/display/intel_bios.c const struct bdb_mipi_config *start; start 861 drivers/gpu/drm/i915/display/intel_bios.c start = find_section(bdb, BDB_MIPI_CONFIG); start 862 drivers/gpu/drm/i915/display/intel_bios.c if (!start) { start 874 drivers/gpu/drm/i915/display/intel_bios.c config = &start->config[panel_type]; start 875 drivers/gpu/drm/i915/display/intel_bios.c pps = &start->pps[panel_type]; start 12954 drivers/gpu/drm/i915/display/intel_display.c sw_ddb_entry->start, sw_ddb_entry->end, start 12955 drivers/gpu/drm/i915/display/intel_display.c hw_ddb_entry->start, hw_ddb_entry->end); start 13006 drivers/gpu/drm/i915/display/intel_display.c sw_ddb_entry->start, sw_ddb_entry->end, start 13007 drivers/gpu/drm/i915/display/intel_display.c hw_ddb_entry->start, hw_ddb_entry->end); start 513 drivers/gpu/drm/i915/display/intel_fbc.c I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); start 515 drivers/gpu/drm/i915/display/intel_fbc.c I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); start 528 drivers/gpu/drm/i915/display/intel_fbc.c GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start, start 529 drivers/gpu/drm/i915/display/intel_fbc.c fbc->compressed_fb.start, start 531 drivers/gpu/drm/i915/display/intel_fbc.c GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start, start 532 drivers/gpu/drm/i915/display/intel_fbc.c fbc->compressed_llb->start, start 535 drivers/gpu/drm/i915/display/intel_fbc.c dev_priv->dsm.start + fbc->compressed_fb.start); start 537 drivers/gpu/drm/i915/display/intel_fbc.c dev_priv->dsm.start + compressed_llb->start); start 235 drivers/gpu/drm/i915/display/intel_fbdev.c info->apertures->ranges[0].base = ggtt->gmadr.start; start 240 drivers/gpu/drm/i915/display/intel_fbdev.c (unsigned long)(ggtt->gmadr.start + vma->node.start); start 1251 drivers/gpu/drm/i915/display/intel_hdmi.c u8 start = offset & 0xff; start 1257 drivers/gpu/drm/i915/display/intel_hdmi.c .buf = &start, start 156 drivers/gpu/drm/i915/display/intel_hotplug.c unsigned long start = hpd->stats[pin].last_jiffies; start 157 drivers/gpu/drm/i915/display/intel_hotplug.c unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); start 166 drivers/gpu/drm/i915/display/intel_hotplug.c if (!time_in_range(jiffies, start, end)) { start 97 drivers/gpu/drm/i915/display/intel_lpe_audio.c rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq; start 101 drivers/gpu/drm/i915/display/intel_lpe_audio.c rsc[1].start = pci_resource_start(dev->pdev, 0) + start 250 drivers/gpu/drm/i915/display/intel_lspcon.c unsigned long start = jiffies; start 255 drivers/gpu/drm/i915/display/intel_lspcon.c jiffies_to_msecs(jiffies - start)); start 259 drivers/gpu/drm/i915/display/intel_lspcon.c if (time_after(jiffies, start + msecs_to_jiffies(1000))) start 219 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c batch->node.start, batch->node.size, start 358 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) start 362 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma->node.start != entry->offset) start 366 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma->node.start < BATCH_OFFSET_BIAS) start 370 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (vma->node.start + vma->node.size - 1) >> 32) start 389 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c pin_flags = vma->node.start; start 547 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (entry->offset != vma->node.start) { start 548 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c entry->offset = vma->node.start | UPDATE; start 615 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (entry->offset != vma->node.start) { start 616 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c entry->offset = vma->node.start | UPDATE; start 894 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return gen8_canonical_addr((int)reloc->delta + target->node.start); start 973 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.start, start 1058 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.start = vma->node.start; start 1063 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c offset = cache->node.start; start 1191 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c batch->node.start, PAGE_SIZE, start 1288 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c addr = gen8_canonical_addr(vma->node.start + offset); start 1345 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return target->node.start | UPDATE; start 1408 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c gen8_canonical_addr(target->node.start) == reloc->presumed_offset) start 2006 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c batch_start = gen8_canonical_addr(eb->batch->node.start) + start 2009 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c shadow_batch_start = gen8_canonical_addr(vma->node.start); start 2098 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch->node.start + start 301 drivers/gpu/drm/i915/gem/i915_gem_mman.c (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, start 48 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c offset = vma->node.start; start 184 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c batch->node.start, batch->node.size, start 232 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c src_offset = src->node.start; start 233 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c dst_offset = dst->node.start; start 381 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c batch->node.start, batch->node.size, start 30 drivers/gpu/drm/i915/gem/i915_gem_stolen.c unsigned alignment, u64 start, u64 end) start 38 drivers/gpu/drm/i915/gem/i915_gem_stolen.c if (INTEL_GEN(dev_priv) >= 8 && start < 4096) start 39 drivers/gpu/drm/i915/gem/i915_gem_stolen.c start = 4096; start 44 drivers/gpu/drm/i915/gem/i915_gem_stolen.c start, end, DRM_MM_INSERT_BEST); start 72 drivers/gpu/drm/i915/gem/i915_gem_stolen.c if (dsm->start == 0 || dsm->end <= dsm->start) start 98 drivers/gpu/drm/i915/gem/i915_gem_stolen.c if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) start 99 drivers/gpu/drm/i915/gem/i915_gem_stolen.c stolen[0].end = ggtt_res.start; start 100 drivers/gpu/drm/i915/gem/i915_gem_stolen.c if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) start 101 drivers/gpu/drm/i915/gem/i915_gem_stolen.c stolen[1].start = ggtt_res.end; start 109 drivers/gpu/drm/i915/gem/i915_gem_stolen.c if (stolen[0].start != stolen[1].start || start 122 drivers/gpu/drm/i915/gem/i915_gem_stolen.c r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, start 135 drivers/gpu/drm/i915/gem/i915_gem_stolen.c r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, start 387 drivers/gpu/drm/i915/gem/i915_gem_stolen.c GEM_BUG_ON(dev_priv->dsm.start == 0); start 388 drivers/gpu/drm/i915/gem/i915_gem_stolen.c GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); start 501 drivers/gpu/drm/i915/gem/i915_gem_stolen.c sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; start 511 drivers/gpu/drm/i915/gem/i915_gem_stolen.c obj->stolen->start, start 639 drivers/gpu/drm/i915/gem/i915_gem_stolen.c stolen->start = stolen_offset; start 20 drivers/gpu/drm/i915/gem/i915_gem_stolen.h unsigned alignment, u64 start, start 173 drivers/gpu/drm/i915/gem/i915_gem_tiling.c if (!IS_ALIGNED(vma->node.start, alignment)) start 106 drivers/gpu/drm/i915/gem/i915_gem_userptr.c it = interval_tree_iter_first(&mn->objects, range->start, end); start 127 drivers/gpu/drm/i915/gem/i915_gem_userptr.c it = interval_tree_iter_next(it, range->start, end); start 167 drivers/gpu/drm/i915/gem/i915_gem_userptr.c it = interval_tree_iter_first(&mn->objects, range->start, end); start 277 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mo->it.start = obj->userptr.ptr; start 234 drivers/gpu/drm/i915/gem/i915_gem_wait.c ktime_t start; start 244 drivers/gpu/drm/i915/gem/i915_gem_wait.c start = ktime_get(); start 253 drivers/gpu/drm/i915/gem/i915_gem_wait.c args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); start 678 drivers/gpu/drm/i915/gem/selftests/huge_pages.c if (!IS_ALIGNED(vma->node.start, start 681 drivers/gpu/drm/i915/gem/selftests/huge_pages.c vma->node.start); start 834 drivers/gpu/drm/i915/gem/selftests/huge_pages.c if (!IS_ALIGNED(vma->node.start, start 837 drivers/gpu/drm/i915/gem/selftests/huge_pages.c vma->node.start); start 600 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c *cmd++ = lower_32_bits(vma->node.start); start 601 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c *cmd++ = upper_32_bits(vma->node.start); start 663 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c batch->node.start, batch->node.size, start 1154 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c if (!node || node->start > offset) start 1157 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c GEM_BUG_ON(offset >= node->start + node->size); start 1220 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); start 1319 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); start 435 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c resv.start = hole_start; start 63 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c offset += vma->node.start; start 136 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c batch->node.start, batch->node.size, start 456 drivers/gpu/drm/i915/gt/intel_engine.h engine->stats.start = ktime_get(); start 480 drivers/gpu/drm/i915/gt/intel_engine.h last = ktime_sub(ktime_get(), engine->stats.start); start 1329 drivers/gpu/drm/i915/gt/intel_engine_cs.c rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, start 1330 drivers/gpu/drm/i915/gt/intel_engine_cs.c rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); start 1466 drivers/gpu/drm/i915/gt/intel_engine_cs.c engine->stats.start = engine->stats.enabled_at; start 1486 drivers/gpu/drm/i915/gt/intel_engine_cs.c ktime_sub(ktime_get(), engine->stats.start)); start 534 drivers/gpu/drm/i915/gt/intel_engine_types.h ktime_t start; start 3316 drivers/gpu/drm/i915/gt/intel_lrc.c const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE; start 3326 drivers/gpu/drm/i915/gt/intel_lrc.c memcpy(vaddr + start, defaults + start, engine->context_size); start 96 drivers/gpu/drm/i915/gt/intel_renderstate.c u64 r = s + so->vma->node.start; start 85 drivers/gpu/drm/i915/gt/intel_workarounds.c unsigned int start = 0, end = wal->count; start 107 drivers/gpu/drm/i915/gt/intel_workarounds.c while (start < end) { start 108 drivers/gpu/drm/i915/gt/intel_workarounds.c unsigned int mid = start + (end - start) / 2; start 111 drivers/gpu/drm/i915/gt/intel_workarounds.c start = mid + 1; start 111 drivers/gpu/drm/i915/gt/selftest_hangcheck.c return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); start 202 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *batch++ = lower_32_bits(vma->node.start); start 203 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *batch++ = upper_32_bits(vma->node.start); start 216 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *batch++ = lower_32_bits(vma->node.start); start 229 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *batch++ = lower_32_bits(vma->node.start); start 241 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *batch++ = lower_32_bits(vma->node.start); start 256 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); start 1467 drivers/gpu/drm/i915/gt/selftest_lrc.c vma->node.start, start 483 drivers/gpu/drm/i915/gt/selftest_workarounds.c u64 addr = scratch->node.start; start 569 drivers/gpu/drm/i915/gt/selftest_workarounds.c batch->node.start, PAGE_SIZE, start 786 drivers/gpu/drm/i915/gt/selftest_workarounds.c u64 offset = results->node.start + sizeof(u32) * i; start 854 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = engine->emit_bb_start(rq, batch->node.start, 0, 0); start 404 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c GEM_BUG_ON(upper_32_bits(node->start)); start 405 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); start 407 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c return lower_32_bits(node->start); start 416 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c .node.start = uc_fw_ggtt_offset(uc_fw, ggtt), start 436 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c u64 start = uc_fw_ggtt_offset(uc_fw, ggtt); start 438 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); start 46 drivers/gpu/drm/i915/gvt/aperture_gm.c u64 start, end, size; start 53 drivers/gpu/drm/i915/gvt/aperture_gm.c start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); start 59 drivers/gpu/drm/i915/gvt/aperture_gm.c start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); start 69 drivers/gpu/drm/i915/gvt/aperture_gm.c start, end, flags); start 151 drivers/gpu/drm/i915/gvt/cfg_space.c u64 start, end; start 160 drivers/gpu/drm/i915/gvt/cfg_space.c start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); start 162 drivers/gpu/drm/i915/gvt/cfg_space.c start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); start 164 drivers/gpu/drm/i915/gvt/cfg_space.c start &= ~GENMASK(3, 0); start 165 drivers/gpu/drm/i915/gvt/cfg_space.c end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; start 167 drivers/gpu/drm/i915/gvt/cfg_space.c ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); start 377 drivers/gpu/drm/i915/gvt/cmd_parser.c #define DWORD_FIELD(dword, end, start) \ start 378 drivers/gpu/drm/i915/gvt/cmd_parser.c FIELD_GET(GENMASK(end, start), cmd_val(s, dword)) start 65 drivers/gpu/drm/i915/gvt/dmabuf.c (fb_info->start >> PAGE_SHIFT); start 221 drivers/gpu/drm/i915/gvt/dmabuf.c info->start = p.base; start 251 drivers/gpu/drm/i915/gvt/dmabuf.c info->start = c.base; start 279 drivers/gpu/drm/i915/gvt/dmabuf.c if (info->start & (PAGE_SIZE - 1)) { start 280 drivers/gpu/drm/i915/gvt/dmabuf.c gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); start 284 drivers/gpu/drm/i915/gvt/dmabuf.c if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { start 309 drivers/gpu/drm/i915/gvt/dmabuf.c if ((fb_info->start == latest_info->start) && start 36 drivers/gpu/drm/i915/gvt/dmabuf.h __u64 start; start 381 drivers/gpu/drm/i915/gvt/gvt.h #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) start 400 drivers/gpu/drm/i915/gvt/gvt.h #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) start 401 drivers/gpu/drm/i915/gvt/gvt.h #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) start 102 drivers/gpu/drm/i915/gvt/handlers.c u32 start, end, i; start 110 drivers/gpu/drm/i915/gvt/handlers.c start = offset; start 113 drivers/gpu/drm/i915/gvt/handlers.c for (i = start; i < end; i += 4) { start 621 drivers/gpu/drm/i915/gvt/handlers.c static unsigned int calc_index(unsigned int offset, unsigned int start, start 624 drivers/gpu/drm/i915/gvt/handlers.c unsigned int range = next - start; start 628 drivers/gpu/drm/i915/gvt/handlers.c if (offset < start || offset > end) start 630 drivers/gpu/drm/i915/gvt/handlers.c offset -= start; start 67 drivers/gpu/drm/i915/gvt/hypercall.h int (*set_trap_area)(unsigned long handle, u64 start, u64 end, start 1192 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned int index, unsigned int start, start 1200 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned int index, unsigned int start, start 1207 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned int index, unsigned int start, unsigned int count, start 1214 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned int index, unsigned int start, unsigned int count, start 1235 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned int index, unsigned int start, unsigned int count, start 1239 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned int start, unsigned int count, u32 flags, start 1272 drivers/gpu/drm/i915/gvt/kvmgt.c return func(vgpu, index, start, count, flags, data); start 1514 drivers/gpu/drm/i915/gvt/kvmgt.c hdr.start, hdr.count, data); start 292 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, u64 start, u64 end, bool map) start 298 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); start 1485 drivers/gpu/drm/i915/gvt/scheduler.c u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; start 1527 drivers/gpu/drm/i915/gvt/scheduler.c RING_CTX_OFF(rb_start.val), &start, 4); start 1533 drivers/gpu/drm/i915/gvt/scheduler.c if (!intel_gvt_ggtt_validate_range(vgpu, start, start 1535 drivers/gpu/drm/i915/gvt/scheduler.c gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); start 1549 drivers/gpu/drm/i915/gvt/scheduler.c workload->rb_start = start; start 1591 drivers/gpu/drm/i915/gvt/scheduler.c workload, ring_id, head, tail, start, ctl); start 338 drivers/gpu/drm/i915/i915_buddy.c u64 start, u64 size) start 351 drivers/gpu/drm/i915/i915_buddy.c if (!IS_ALIGNED(size | start, mm->chunk_size)) start 354 drivers/gpu/drm/i915/i915_buddy.c if (range_overflows(start, size, mm->size)) start 360 drivers/gpu/drm/i915/i915_buddy.c end = start + size - 1; start 377 drivers/gpu/drm/i915/i915_buddy.c if (!overlaps(start, end, block_start, block_end)) start 385 drivers/gpu/drm/i915/i915_buddy.c if (contains(start, end, block_start, block_end)) { start 122 drivers/gpu/drm/i915/i915_buddy.h u64 start, u64 size); start 1100 drivers/gpu/drm/i915/i915_cmd_parser.c int start = 0, end = count; start 1101 drivers/gpu/drm/i915/i915_cmd_parser.c while (start < end) { start 1102 drivers/gpu/drm/i915/i915_cmd_parser.c int mid = start + (end - start) / 2; start 1107 drivers/gpu/drm/i915/i915_cmd_parser.c start = mid + 1; start 170 drivers/gpu/drm/i915/i915_debugfs.c vma->node.start, vma->node.size, start 223 drivers/gpu/drm/i915/i915_debugfs.c seq_printf(m, " (stolen: %08llx)", obj->stolen->start); start 2978 drivers/gpu/drm/i915/i915_debugfs.c entry->start, entry->end, start 2983 drivers/gpu/drm/i915/i915_debugfs.c seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, start 190 drivers/gpu/drm/i915/i915_drv.c dev_priv->mch_res.start = 0; start 196 drivers/gpu/drm/i915/i915_drv.c upper_32_bits(dev_priv->mch_res.start)); start 199 drivers/gpu/drm/i915/i915_drv.c lower_32_bits(dev_priv->mch_res.start)); start 268 drivers/gpu/drm/i915/i915_drv.c if (dev_priv->mch_res.start) start 434 drivers/gpu/drm/i915/i915_drv.c ap->ranges[0].base = ggtt->gmadr.start; start 878 drivers/gpu/drm/i915/i915_drv.h u16 start, end; /* in number of blocks, 'end' is exclusive */ start 883 drivers/gpu/drm/i915/i915_drv.h return entry->end - entry->start; start 889 drivers/gpu/drm/i915/i915_drv.h if (e1->start == e2->start && e1->end == e2->end) start 2383 drivers/gpu/drm/i915/i915_drv.h u64 start, u64 end, start 353 drivers/gpu/drm/i915/i915_gem.c node.start = i915_ggtt_offset(vma); start 392 drivers/gpu/drm/i915/i915_gem.c u32 page_base = node.start; start 399 drivers/gpu/drm/i915/i915_gem.c node.start, I915_CACHE_NONE, 0); start 419 drivers/gpu/drm/i915/i915_gem.c ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); start 563 drivers/gpu/drm/i915/i915_gem.c node.start = i915_ggtt_offset(vma); start 603 drivers/gpu/drm/i915/i915_gem.c u32 page_base = node.start; start 612 drivers/gpu/drm/i915/i915_gem.c node.start, I915_CACHE_NONE, 0); start 640 drivers/gpu/drm/i915/i915_gem.c ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); start 95 drivers/gpu/drm/i915/i915_gem_evict.c u64 start, u64 end, start 128 drivers/gpu/drm/i915/i915_gem_evict.c start, end, mode); start 266 drivers/gpu/drm/i915/i915_gem_evict.c u64 start = target->start; start 267 drivers/gpu/drm/i915/i915_gem_evict.c u64 end = start + target->size; start 273 drivers/gpu/drm/i915/i915_gem_evict.c GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); start 289 drivers/gpu/drm/i915/i915_gem_evict.c if (start) start 290 drivers/gpu/drm/i915/i915_gem_evict.c start -= I915_GTT_PAGE_SIZE; start 295 drivers/gpu/drm/i915/i915_gem_evict.c GEM_BUG_ON(start >= end); start 297 drivers/gpu/drm/i915/i915_gem_evict.c drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { start 314 drivers/gpu/drm/i915/i915_gem_evict.c if (node->start + node->size == target->start) { start 318 drivers/gpu/drm/i915/i915_gem_evict.c if (node->start == target->start + target->size) { start 85 drivers/gpu/drm/i915/i915_gem_fence_reg.c GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE)); start 89 drivers/gpu/drm/i915/i915_gem_fence_reg.c val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32; start 90 drivers/gpu/drm/i915/i915_gem_fence_reg.c val |= vma->node.start; start 131 drivers/gpu/drm/i915/i915_gem_fence_reg.c GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK); start 133 drivers/gpu/drm/i915/i915_gem_fence_reg.c GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size)); start 141 drivers/gpu/drm/i915/i915_gem_fence_reg.c val = vma->node.start; start 169 drivers/gpu/drm/i915/i915_gem_fence_reg.c GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK); start 172 drivers/gpu/drm/i915/i915_gem_fence_reg.c GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size)); start 174 drivers/gpu/drm/i915/i915_gem_fence_reg.c val = vma->node.start; start 154 drivers/gpu/drm/i915/i915_gem_gtt.c vma->node.start, vma->size); start 171 drivers/gpu/drm/i915/i915_gem_gtt.c vma->vm->clear_range(vma->vm, vma->node.start, vma->size); start 879 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) start 884 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(start >= end); start 887 drivers/gpu/drm/i915/i915_gem_gtt.c *idx = i915_pde_index(start, shift); start 888 drivers/gpu/drm/i915/i915_gem_gtt.c if ((start ^ end) & mask) start 894 drivers/gpu/drm/i915/i915_gem_gtt.c static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) start 898 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(start >= end); start 899 drivers/gpu/drm/i915/i915_gem_gtt.c return (start ^ end) & mask && (start & ~mask) == 0; start 902 drivers/gpu/drm/i915/i915_gem_gtt.c static inline unsigned int gen8_pt_count(u64 start, u64 end) start 904 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(start >= end); start 905 drivers/gpu/drm/i915/i915_gem_gtt.c if ((start ^ end) >> gen8_pd_shift(1)) start 906 drivers/gpu/drm/i915/i915_gem_gtt.c return GEN8_PDES - (start & (GEN8_PDES - 1)); start 908 drivers/gpu/drm/i915/i915_gem_gtt.c return end - start; start 965 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, const u64 end, int lvl) start 972 drivers/gpu/drm/i915/i915_gem_gtt.c len = gen8_pd_range(start, end, lvl--, &idx); start 974 drivers/gpu/drm/i915/i915_gem_gtt.c __func__, vm, lvl + 1, start, end, start 982 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pd_contains(start, end, lvl)) { start 984 drivers/gpu/drm/i915/i915_gem_gtt.c __func__, vm, lvl + 1, idx, start, end); start 987 drivers/gpu/drm/i915/i915_gem_gtt.c start += (u64)I915_PDES << gen8_pd_shift(lvl); start 992 drivers/gpu/drm/i915/i915_gem_gtt.c start = __gen8_ppgtt_clear(vm, as_pd(pt), start 993 drivers/gpu/drm/i915/i915_gem_gtt.c start, end, lvl); start 998 drivers/gpu/drm/i915/i915_gem_gtt.c count = gen8_pt_count(start, end); start 1000 drivers/gpu/drm/i915/i915_gem_gtt.c __func__, vm, lvl, start, end, start 1001 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pd_index(start, 0), count, start 1006 drivers/gpu/drm/i915/i915_gem_gtt.c memset64(vaddr + gen8_pd_index(start, 0), start 1012 drivers/gpu/drm/i915/i915_gem_gtt.c start += count; start 1019 drivers/gpu/drm/i915/i915_gem_gtt.c return start; start 1023 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 1025 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); start 1027 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(range_overflows(start, length, vm->total)); start 1029 drivers/gpu/drm/i915/i915_gem_gtt.c start >>= GEN8_PTE_SHIFT; start 1034 drivers/gpu/drm/i915/i915_gem_gtt.c start, start + length, vm->top); start 1039 drivers/gpu/drm/i915/i915_gem_gtt.c u64 * const start, const u64 end, int lvl) start 1048 drivers/gpu/drm/i915/i915_gem_gtt.c len = gen8_pd_range(*start, end, lvl--, &idx); start 1050 drivers/gpu/drm/i915/i915_gem_gtt.c __func__, vm, lvl + 1, *start, end, start 1086 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pt_count(*start, end) < I915_PDES) start 1102 drivers/gpu/drm/i915/i915_gem_gtt.c start, end, lvl); start 1113 drivers/gpu/drm/i915/i915_gem_gtt.c unsigned int count = gen8_pt_count(*start, end); start 1116 drivers/gpu/drm/i915/i915_gem_gtt.c __func__, vm, lvl, *start, end, start 1117 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pd_index(*start, 0), count, start 1123 drivers/gpu/drm/i915/i915_gem_gtt.c *start += count; start 1134 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 1139 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); start 1141 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(range_overflows(start, length, vm->total)); start 1143 drivers/gpu/drm/i915/i915_gem_gtt.c start >>= GEN8_PTE_SHIFT; start 1146 drivers/gpu/drm/i915/i915_gem_gtt.c from = start; start 1149 drivers/gpu/drm/i915/i915_gem_gtt.c &start, start + length, vm->top); start 1150 drivers/gpu/drm/i915/i915_gem_gtt.c if (unlikely(err && from != start)) start 1152 drivers/gpu/drm/i915/i915_gem_gtt.c from, start, vm->top); start 1220 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start = vma->node.start; start 1227 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pdp_for_page_address(vma->vm, start); start 1229 drivers/gpu/drm/i915/i915_gem_gtt.c i915_pd_entry(pdp, __gen8_pte_index(start, 2)); start 1239 drivers/gpu/drm/i915/i915_gem_gtt.c !__gen8_pte_index(start, 0)) { start 1240 drivers/gpu/drm/i915/i915_gem_gtt.c index = __gen8_pte_index(start, 1); start 1247 drivers/gpu/drm/i915/i915_gem_gtt.c i915_pt_entry(pd, __gen8_pte_index(start, 1)); start 1249 drivers/gpu/drm/i915/i915_gem_gtt.c index = __gen8_pte_index(start, 0); start 1257 drivers/gpu/drm/i915/i915_gem_gtt.c maybe_64K = __gen8_pte_index(start, 1); start 1266 drivers/gpu/drm/i915/i915_gem_gtt.c start += page_size; start 1300 drivers/gpu/drm/i915/i915_gem_gtt.c !iter->sg && IS_ALIGNED(vma->node.start + start 1345 drivers/gpu/drm/i915/i915_gem_gtt.c u64 idx = vma->node.start >> GEN8_PTE_SHIFT; start 1605 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 1608 drivers/gpu/drm/i915/i915_gem_gtt.c const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; start 1650 drivers/gpu/drm/i915/i915_gem_gtt.c unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; start 1686 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 1692 drivers/gpu/drm/i915/i915_gem_gtt.c u64 from = start; start 1700 drivers/gpu/drm/i915/i915_gem_gtt.c gen6_for_each_pde(pt, pd, start, length, pde) { start 1701 drivers/gpu/drm/i915/i915_gem_gtt.c const unsigned int count = gen6_pte_count(start, length); start 1742 drivers/gpu/drm/i915/i915_gem_gtt.c gen6_ppgtt_clear_range(vm, from, start - from); start 2206 drivers/gpu/drm/i915/i915_gem_gtt.c gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; start 2245 drivers/gpu/drm/i915/i915_gem_gtt.c unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; start 2259 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 2264 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 2267 drivers/gpu/drm/i915/i915_gem_gtt.c unsigned first_entry = start / I915_GTT_PAGE_SIZE; start 2355 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start; start 2363 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); start 2370 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, start 2373 drivers/gpu/drm/i915/i915_gem_gtt.c struct clear_range arg = { vm, start, length }; start 2379 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 2382 drivers/gpu/drm/i915/i915_gem_gtt.c unsigned first_entry = start / I915_GTT_PAGE_SIZE; start 2419 drivers/gpu/drm/i915/i915_gem_gtt.c intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, start 2424 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 length) start 2426 drivers/gpu/drm/i915/i915_gem_gtt.c intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); start 2464 drivers/gpu/drm/i915/i915_gem_gtt.c vma->vm->clear_range(vma->vm, vma->node.start, vma->size); start 2485 drivers/gpu/drm/i915/i915_gem_gtt.c vma->node.start, start 2516 drivers/gpu/drm/i915/i915_gem_gtt.c vm->clear_range(vm, vma->node.start, vma->size); start 2523 drivers/gpu/drm/i915/i915_gem_gtt.c vm->clear_range(vm, vma->node.start, vma->size); start 2562 drivers/gpu/drm/i915/i915_gem_gtt.c u64 *start, start 2566 drivers/gpu/drm/i915/i915_gem_gtt.c *start += I915_GTT_PAGE_SIZE; start 3220 drivers/gpu/drm/i915/i915_gem_gtt.c ggtt->gmadr.start, start 3227 drivers/gpu/drm/i915/i915_gem_gtt.c ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); start 3646 drivers/gpu/drm/i915/i915_gem_gtt.c node->start = offset; start 3663 drivers/gpu/drm/i915/i915_gem_gtt.c static u64 random_offset(u64 start, u64 end, u64 len, u64 align) start 3667 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(range_overflows(start, len, end)); start 3668 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); start 3670 drivers/gpu/drm/i915/i915_gem_gtt.c range = round_down(end - len, align) - round_up(start, align); start 3682 drivers/gpu/drm/i915/i915_gem_gtt.c start += addr; start 3685 drivers/gpu/drm/i915/i915_gem_gtt.c return round_up(start, align); start 3725 drivers/gpu/drm/i915/i915_gem_gtt.c u64 start, u64 end, unsigned int flags) start 3736 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(start >= end); start 3737 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); start 3742 drivers/gpu/drm/i915/i915_gem_gtt.c if (unlikely(range_overflows(start, size, end))) start 3745 drivers/gpu/drm/i915/i915_gem_gtt.c if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) start 3766 drivers/gpu/drm/i915/i915_gem_gtt.c start, end, mode); start 3773 drivers/gpu/drm/i915/i915_gem_gtt.c start, end, start 3805 drivers/gpu/drm/i915/i915_gem_gtt.c offset = random_offset(start, end, start 3816 drivers/gpu/drm/i915/i915_gem_gtt.c start, end, flags); start 3822 drivers/gpu/drm/i915/i915_gem_gtt.c start, end, DRM_MM_INSERT_EVICT); start 345 drivers/gpu/drm/i915/i915_gem_gtt.h u64 start, u64 length); start 347 drivers/gpu/drm/i915/i915_gem_gtt.h u64 start, u64 length); start 455 drivers/gpu/drm/i915/i915_gem_gtt.h #define gen6_for_each_pde(pt, pd, start, length, iter) \ start 456 drivers/gpu/drm/i915/i915_gem_gtt.h for (iter = gen6_pde_index(start); \ start 459 drivers/gpu/drm/i915/i915_gem_gtt.h ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ start 460 drivers/gpu/drm/i915/i915_gem_gtt.h temp = min(temp - start, length); \ start 461 drivers/gpu/drm/i915/i915_gem_gtt.h start += temp, length -= temp; }), ++iter) start 600 drivers/gpu/drm/i915/i915_gem_gtt.h u64 start, u64 end, unsigned int flags); start 466 drivers/gpu/drm/i915/i915_gpu_error.c erq->start, erq->head, erq->tail); start 486 drivers/gpu/drm/i915/i915_gpu_error.c err_printf(m, " START: 0x%08x\n", ee->start); start 501 drivers/gpu/drm/i915/i915_gpu_error.c u64 start = ee->batchbuffer->gtt_offset; start 502 drivers/gpu/drm/i915/i915_gpu_error.c u64 end = start + ee->batchbuffer->gtt_size; start 505 drivers/gpu/drm/i915/i915_gpu_error.c upper_32_bits(start), lower_32_bits(start), start 852 drivers/gpu/drm/i915/i915_gpu_error.c size_t len, start; start 865 drivers/gpu/drm/i915/i915_gpu_error.c start = sg->offset; start 869 drivers/gpu/drm/i915/i915_gpu_error.c start += off - pos; start 876 drivers/gpu/drm/i915/i915_gpu_error.c memcpy(buf, page_address(sg_page(sg)) + start, len); start 962 drivers/gpu/drm/i915/i915_gpu_error.c const u64 slot = ggtt->error_capture.start; start 985 drivers/gpu/drm/i915/i915_gpu_error.c dst->gtt_offset = vma->node.start; start 1100 drivers/gpu/drm/i915/i915_gpu_error.c ee->start = ENGINE_READ(engine, RING_START); start 1179 drivers/gpu/drm/i915/i915_gpu_error.c erq->start = i915_ggtt_offset(request->ring->vma); start 1350 drivers/gpu/drm/i915/i915_gpu_error.c .node = { .start = U64_MAX, .size = obj->base.size }, start 1667 drivers/gpu/drm/i915/i915_gpu_error.c const u64 slot = ggtt->error_capture.start; start 100 drivers/gpu/drm/i915/i915_gpu_error.h u32 start; start 148 drivers/gpu/drm/i915/i915_gpu_error.h u32 start; start 1071 drivers/gpu/drm/i915/i915_pmu.c pmu->base.start = i915_pmu_event_start; start 469 drivers/gpu/drm/i915/i915_trace.h __entry->offset = vma->node.start; start 494 drivers/gpu/drm/i915/i915_trace.h __entry->offset = vma->node.start; start 624 drivers/gpu/drm/i915/i915_trace.h __field(u64, start) start 633 drivers/gpu/drm/i915/i915_trace.h __entry->start = node->start; start 641 drivers/gpu/drm/i915/i915_trace.h __entry->start, __entry->size, start 96 drivers/gpu/drm/i915/i915_utils.h #define range_overflows(start, size, max) ({ \ start 97 drivers/gpu/drm/i915/i915_utils.h typeof(start) start__ = (start); \ start 105 drivers/gpu/drm/i915/i915_utils.h #define range_overflows_t(type, start, size, max) \ start 106 drivers/gpu/drm/i915/i915_utils.h range_overflows((type)(start), (type)(size), (type)(max)) start 127 drivers/gpu/drm/i915/i915_vgpu.c node->start, start 128 drivers/gpu/drm/i915/i915_vgpu.c node->start + node->size, start 157 drivers/gpu/drm/i915/i915_vgpu.c unsigned long start, unsigned long end) start 159 drivers/gpu/drm/i915/i915_vgpu.c unsigned long size = end - start; start 162 drivers/gpu/drm/i915/i915_vgpu.c if (start >= end) start 166 drivers/gpu/drm/i915/i915_vgpu.c start, end, size / 1024); start 168 drivers/gpu/drm/i915/i915_vgpu.c size, start, I915_COLOR_UNEVICTABLE, start 65 drivers/gpu/drm/i915/i915_vma.c vma->node.start, vma->node.size, reason); start 72 drivers/gpu/drm/i915/i915_vma.c vma->node.start, vma->node.size, reason, buf); start 314 drivers/gpu/drm/i915/i915_vma.c if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, start 367 drivers/gpu/drm/i915/i915_vma.c vma->node.start, start 445 drivers/gpu/drm/i915/i915_vma.c if (alignment && !IS_ALIGNED(vma->node.start, alignment)) start 452 drivers/gpu/drm/i915/i915_vma.c vma->node.start < (flags & PIN_OFFSET_MASK)) start 456 drivers/gpu/drm/i915/i915_vma.c vma->node.start != (flags & PIN_OFFSET_MASK)) start 470 drivers/gpu/drm/i915/i915_vma.c IS_ALIGNED(vma->node.start, vma->fence_alignment)); start 472 drivers/gpu/drm/i915/i915_vma.c mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; start 546 drivers/gpu/drm/i915/i915_vma.c u64 start, end; start 565 drivers/gpu/drm/i915/i915_vma.c start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; start 566 drivers/gpu/drm/i915/i915_vma.c GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); start 651 drivers/gpu/drm/i915/i915_vma.c start, end, flags); start 655 drivers/gpu/drm/i915/i915_vma.c GEM_BUG_ON(vma->node.start < start); start 656 drivers/gpu/drm/i915/i915_vma.c GEM_BUG_ON(vma->node.start + vma->node.size > end); start 218 drivers/gpu/drm/i915/i915_vma.h GEM_BUG_ON(upper_32_bits(vma->node.start)); start 219 drivers/gpu/drm/i915/i915_vma.h GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); start 220 drivers/gpu/drm/i915/i915_vma.h return lower_32_bits(vma->node.start); start 3876 drivers/gpu/drm/i915/intel_pm.c alloc->start = 0; start 3930 drivers/gpu/drm/i915/intel_pm.c alloc->start = ddb_size * width_before_pipe / total_width; start 3977 drivers/gpu/drm/i915/intel_pm.c entry->start = reg & DDB_ENTRY_MASK; start 4342 drivers/gpu/drm/i915/intel_pm.c u16 alloc_size, start = 0; start 4361 drivers/gpu/drm/i915/intel_pm.c alloc->start = alloc->end = 0; start 4385 drivers/gpu/drm/i915/intel_pm.c crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = start 4471 drivers/gpu/drm/i915/intel_pm.c start = alloc->start; start 4486 drivers/gpu/drm/i915/intel_pm.c plane_alloc->start = start; start 4487 drivers/gpu/drm/i915/intel_pm.c start += total[plane_id]; start 4488 drivers/gpu/drm/i915/intel_pm.c plane_alloc->end = start; start 4492 drivers/gpu/drm/i915/intel_pm.c uv_plane_alloc->start = start; start 4493 drivers/gpu/drm/i915/intel_pm.c start += uv_total[plane_id]; start 4494 drivers/gpu/drm/i915/intel_pm.c uv_plane_alloc->end = start; start 5126 drivers/gpu/drm/i915/intel_pm.c I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start); start 5248 drivers/gpu/drm/i915/intel_pm.c return a->start < b->end && b->start < a->end; start 5372 drivers/gpu/drm/i915/intel_pm.c old->start, old->end, new->start, new->end, start 7000 drivers/gpu/drm/i915/intel_pm.c if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) && start 7675 drivers/gpu/drm/i915/intel_pm.c WARN_ON(pctx_addr != dev_priv->dsm.start + start 7676 drivers/gpu/drm/i915/intel_pm.c dev_priv->vlv_pctx->stolen->start); start 7719 drivers/gpu/drm/i915/intel_pm.c pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start; start 7744 drivers/gpu/drm/i915/intel_pm.c dev_priv->dsm.start, start 7745 drivers/gpu/drm/i915/intel_pm.c pctx->stolen->start, start 7747 drivers/gpu/drm/i915/intel_pm.c pctx_paddr = dev_priv->dsm.start + pctx->stolen->start; start 823 drivers/gpu/drm/i915/intel_uncore.c if (offset < entry->start) start 879 drivers/gpu/drm/i915/intel_uncore.c { .start = (s), .end = (e), .domains = (d) } start 103 drivers/gpu/drm/i915/intel_uncore.h u32 start; start 40 drivers/gpu/drm/i915/selftests/i915_gem.c const u64 slot = ggtt->error_capture.start; start 46 drivers/gpu/drm/i915/selftests/i915_gem.c const dma_addr_t dma = i915->dsm.start + page; start 220 drivers/gpu/drm/i915/selftests/i915_gem_evict.c .start = 0, start 257 drivers/gpu/drm/i915/selftests/i915_gem_evict.c u64 *start, start 268 drivers/gpu/drm/i915/selftests/i915_gem_evict.c .start = I915_GTT_PAGE_SIZE * 2, start 294 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c mock_vma.node.start = addr; start 411 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), start 441 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, vma->node.start, vma->node.size, start 450 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, vma->node.start, vma->node.size, start 484 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), start 514 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), start 523 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, vma->node.start, vma->node.size, start 1070 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (a->start < b->start) start 1175 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 offset = tmp.start + n * PAGE_SIZE; start 1189 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 offset = tmp.start + order[n] * PAGE_SIZE; start 1200 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 offset = tmp.start + order[n] * PAGE_SIZE; start 1218 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); start 1346 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (vma->node.start != total || start 1349 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c vma->node.start, vma->node.size, start 1396 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (vma->node.start != total || start 1399 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c vma->node.start, vma->node.size, start 1440 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (vma->node.start != offset || start 1443 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c vma->node.start, vma->node.size, start 1466 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 start, end; start 1503 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c ii->start, ii->end, start 1507 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c ii->size, ii->alignment, ii->start, ii->end, start 1590 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c offset = vma->node.start; start 1610 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (vma->node.start != offset) { start 1612 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c offset, vma->node.start); start 669 drivers/gpu/drm/i915/selftests/i915_request.c batch->node.start, start 793 drivers/gpu/drm/i915/selftests/i915_request.c *cmd++ = lower_32_bits(vma->node.start); start 794 drivers/gpu/drm/i915/selftests/i915_request.c *cmd++ = upper_32_bits(vma->node.start); start 797 drivers/gpu/drm/i915/selftests/i915_request.c *cmd++ = lower_32_bits(vma->node.start); start 800 drivers/gpu/drm/i915/selftests/i915_request.c *cmd++ = lower_32_bits(vma->node.start); start 872 drivers/gpu/drm/i915/selftests/i915_request.c batch->node.start, start 990 drivers/gpu/drm/i915/selftests/i915_request.c batch->node.start, start 71 drivers/gpu/drm/i915/selftests/igt_spinner.c return hws->node.start + seqno_offset(rq->fence.context); start 143 drivers/gpu/drm/i915/selftests/igt_spinner.c *batch++ = lower_32_bits(vma->node.start); start 144 drivers/gpu/drm/i915/selftests/igt_spinner.c *batch++ = upper_32_bits(vma->node.start); start 156 drivers/gpu/drm/i915/selftests/igt_spinner.c err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); start 36 drivers/gpu/drm/i915/selftests/intel_uncore.c if (is_watertight && (prev + 1) != (s32)ranges->start) { start 38 drivers/gpu/drm/i915/selftests/intel_uncore.c __func__, i, ranges->start, ranges->end, prev); start 43 drivers/gpu/drm/i915/selftests/intel_uncore.c if (prev >= (s32)ranges->start) { start 45 drivers/gpu/drm/i915/selftests/intel_uncore.c __func__, i, ranges->start, ranges->end, prev); start 50 drivers/gpu/drm/i915/selftests/intel_uncore.c if (ranges->start >= ranges->end) { start 52 drivers/gpu/drm/i915/selftests/intel_uncore.c __func__, i, ranges->start, ranges->end); start 34 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned long start, end; start 50 drivers/gpu/drm/i915/selftests/scatterlist.c pfn = pt->start; start 88 drivers/gpu/drm/i915/selftests/scatterlist.c pfn = pt->start; start 120 drivers/gpu/drm/i915/selftests/scatterlist.c pfn = pt->start; start 234 drivers/gpu/drm/i915/selftests/scatterlist.c pt->start = PFN_BIAS; start 235 drivers/gpu/drm/i915/selftests/scatterlist.c pfn = pt->start; start 35 drivers/gpu/drm/lima/lima_vm.c static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end) start 39 drivers/gpu/drm/lima/lima_vm.c for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { start 48 drivers/gpu/drm/lima/lima_vm.c u32 start, u32 end) start 53 drivers/gpu/drm/lima/lima_vm.c for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { start 66 drivers/gpu/drm/lima/lima_vm.c if (addr != start) start 67 drivers/gpu/drm/lima/lima_vm.c lima_vm_unmap_page_table(vm, start, addr - 1); start 135 drivers/gpu/drm/lima/lima_vm.c err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start, start 136 drivers/gpu/drm/lima/lima_vm.c bo_va->node.start + bo_va->node.size - 1); start 171 drivers/gpu/drm/lima/lima_vm.c lima_vm_unmap_page_table(vm, bo_va->node.start, start 172 drivers/gpu/drm/lima/lima_vm.c bo_va->node.start + bo_va->node.size - 1); start 193 drivers/gpu/drm/lima/lima_vm.c ret = bo_va->node.start; start 67 drivers/gpu/drm/mediatek/mtk_disp_color.c .start = mtk_color_start, start 222 drivers/gpu/drm/mediatek/mtk_disp_ovl.c .start = mtk_ovl_start, start 225 drivers/gpu/drm/mediatek/mtk_disp_rdma.c .start = mtk_rdma_start, start 581 drivers/gpu/drm/mediatek/mtk_dpi.c .start = mtk_dpi_start, start 170 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c .start = mtk_aal_start, start 177 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c .start = mtk_gamma_start, start 183 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c .start = mtk_od_start, start 187 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c .start = mtk_ufoe_start, start 69 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h void (*start)(struct mtk_ddp_comp *comp); start 101 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h if (comp->funcs && comp->funcs->start) start 102 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h comp->funcs->start(comp); start 868 drivers/gpu/drm/mediatek/mtk_dsi.c .start = mtk_dsi_ddp_start, start 314 drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start, start 229 drivers/gpu/drm/meson/meson_drv.c regs = devm_ioremap(dev, res->start, resource_size(res)); start 160 drivers/gpu/drm/meson/meson_overlay.c int temp, start, end; start 202 drivers/gpu/drm/meson/meson_overlay.c start = video_top + video_height / 2 - ((h_in << 17) / ratio_y); start 203 drivers/gpu/drm/meson/meson_overlay.c end = (h_in << 18) / ratio_y + start - 1; start 205 drivers/gpu/drm/meson/meson_overlay.c if (video_top < 0 && start < 0) start 206 drivers/gpu/drm/meson/meson_overlay.c vd_start_lines = (-(start) * ratio_y) >> 18; start 207 drivers/gpu/drm/meson/meson_overlay.c else if (start < video_top) start 208 drivers/gpu/drm/meson/meson_overlay.c vd_start_lines = ((video_top - start) * ratio_y) >> 18; start 233 drivers/gpu/drm/meson/meson_overlay.c start >>= 1; start 237 drivers/gpu/drm/meson/meson_overlay.c vsc_startp = max_t(int, start, start 248 drivers/gpu/drm/meson/meson_overlay.c start = video_left + video_width / 2 - ((w_in << 17) / ratio_x); start 249 drivers/gpu/drm/meson/meson_overlay.c end = (w_in << 18) / ratio_x + start - 1; start 251 drivers/gpu/drm/meson/meson_overlay.c if (video_left < 0 && start < 0) start 252 drivers/gpu/drm/meson/meson_overlay.c hd_start_lines = (-(start) * ratio_x) >> 18; start 253 drivers/gpu/drm/meson/meson_overlay.c else if (start < video_left) start 254 drivers/gpu/drm/meson/meson_overlay.c hd_start_lines = ((video_left - start) * ratio_x) >> 18; start 271 drivers/gpu/drm/meson/meson_overlay.c hsc_startp = max_t(int, start, max_t(int, 0, video_left)); start 33 drivers/gpu/drm/meson/meson_plane.c #define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start) start 912 drivers/gpu/drm/mga/mga_dma.c dev_priv->prim.start = (u8 *) dev_priv->primary->handle; start 63 drivers/gpu/drm/mga/mga_drv.h u8 *start; start 291 drivers/gpu/drm/mga/mga_drv.h prim = dev_priv->prim.start; \ start 301 drivers/gpu/drm/mga/mga_drv.h prim = dev_priv->prim.start; \ start 600 drivers/gpu/drm/mga/mga_state.c u32 start = box->y1 * dev_priv->front_pitch; start 605 drivers/gpu/drm/mga/mga_state.c DMA_BLOCK(MGA_AR0, start + box->x2 - 1, start 606 drivers/gpu/drm/mga/mga_state.c MGA_AR3, start + box->x1, start 670 drivers/gpu/drm/mga/mga_state.c unsigned int start, unsigned int end) start 678 drivers/gpu/drm/mga/mga_state.c DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end); start 680 drivers/gpu/drm/mga/mga_state.c if (start != end) { start 695 drivers/gpu/drm/mga/mga_state.c MGA_SETUPADDRESS, address + start, start 799 drivers/gpu/drm/mga/mga_state.c int start; start 804 drivers/gpu/drm/mga/mga_state.c start = srcy * blit->src_pitch + srcx; start 806 drivers/gpu/drm/mga/mga_state.c DMA_BLOCK(MGA_AR0, start + w, start 807 drivers/gpu/drm/mga/mga_state.c MGA_AR3, start, start 935 drivers/gpu/drm/mga/mga_state.c mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); start 1200 drivers/gpu/drm/msm/adreno/a6xx_gmu.c ret = ioremap(res->start, resource_size(res)); start 352 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c cxdbg = ioremap(res->start, resource_size(res)); start 58 drivers/gpu/drm/msm/adreno/adreno_gpu.c mem_phys = r.start; start 349 drivers/gpu/drm/msm/adreno/adreno_gpu.c ring->cur = ring->start; start 350 drivers/gpu/drm/msm/adreno/adreno_gpu.c ring->next = ring->start; start 546 drivers/gpu/drm/msm/adreno/adreno_gpu.c if (gpu->rb[i]->start[j]) start 552 drivers/gpu/drm/msm/adreno/adreno_gpu.c memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); start 572 drivers/gpu/drm/msm/adreno/adreno_gpu.c u32 start = adreno_gpu->registers[i]; start 576 drivers/gpu/drm/msm/adreno/adreno_gpu.c for (addr = start; addr <= end; addr++) { start 791 drivers/gpu/drm/msm/adreno/adreno_gpu.c uint32_t start = adreno_gpu->registers[i]; start 795 drivers/gpu/drm/msm/adreno/adreno_gpu.c for (addr = start; addr <= end; addr++) { start 807 drivers/gpu/drm/msm/adreno/adreno_gpu.c uint32_t wptr = ring->next - ring->start; start 368 drivers/gpu/drm/msm/adreno/adreno_gpu.h return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); start 97 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c if (refcount == 1 && hw_blk->ops.start) { start 98 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c rc = hw_blk->ops.start(hw_blk); start 20 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h int (*start)(struct dpu_hw_blk *); start 92 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c bool start = !mdp5_cstate->defer_start; start 98 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c return mdp5_ctl_commit(ctl, pipeline, flush_mask, start); start 624 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c enum mdp_mixer_stage_id start; start 673 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c start = get_start_stage(crtc, state, &pstates[0].state->base); start 678 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) { start 680 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c cnt, start); start 688 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c pstates[i].state->stage = start + i; start 528 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c u32 flush_mask, bool start) start 550 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if (!start) { start 73 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h u32 flush_mask, bool start); start 1791 drivers/gpu/drm/msm/dsi/dsi_host.c if (cfg->io_start[i] == res->start) start 529 drivers/gpu/drm/msm/dsi/phy/dsi_phy.c if (cfg->io_start[i] == res->start) start 140 drivers/gpu/drm/msm/hdmi/hdmi.c hdmi->mmio_phy_addr = res->start; start 141 drivers/gpu/drm/msm/msm_drv.c ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); start 340 drivers/gpu/drm/msm/msm_drv.c size = r.end - r.start; start 341 drivers/gpu/drm/msm/msm_drv.c DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); start 27 drivers/gpu/drm/msm/msm_gem.c return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + start 120 drivers/gpu/drm/msm/msm_gem_vma.c vma->iova = vma->node.start << PAGE_SHIFT; start 276 drivers/gpu/drm/msm/msm_gpu.c iter.start = offset; start 58 drivers/gpu/drm/msm/msm_gpu_trace.h u64 start, u64 end), start 59 drivers/gpu/drm/msm/msm_gpu_trace.h TP_ARGS(submit, elapsed, clock, start, end), start 77 drivers/gpu/drm/msm/msm_gpu_trace.h __entry->start_ticks = start; start 29 drivers/gpu/drm/msm/msm_ringbuffer.c ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, start 32 drivers/gpu/drm/msm/msm_ringbuffer.c if (IS_ERR(ring->start)) { start 33 drivers/gpu/drm/msm/msm_ringbuffer.c ret = PTR_ERR(ring->start); start 34 drivers/gpu/drm/msm/msm_ringbuffer.c ring->start = 0; start 40 drivers/gpu/drm/msm/msm_ringbuffer.c ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); start 41 drivers/gpu/drm/msm/msm_ringbuffer.c ring->next = ring->start; start 42 drivers/gpu/drm/msm/msm_ringbuffer.c ring->cur = ring->start; start 40 drivers/gpu/drm/msm/msm_ringbuffer.h uint32_t *start, *end, *cur, *next; start 65 drivers/gpu/drm/msm/msm_ringbuffer.h ring->next = ring->start; start 238 drivers/gpu/drm/nouveau/dispnv04/hw.h static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start) start 240 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3); start 177 drivers/gpu/drm/nouveau/dispnv50/disp.c .start = syncbuf + 0x0000, start 188 drivers/gpu/drm/nouveau/dispnv50/disp.c .start = 0, start 70 drivers/gpu/drm/nouveau/dispnv50/wndw.c args.base.start = 0; start 19 drivers/gpu/drm/nouveau/include/nvif/cl0002.h __u64 start; start 20 drivers/gpu/drm/nouveau/include/nvif/vmm.h u64 start; start 15 drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h u64 start; start 86 drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h void (*start)(struct nvkm_falcon *); start 33 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h u64 start; start 550 drivers/gpu/drm/nouveau/nouveau_abi16.c args.start = ntfy->node->offset; start 555 drivers/gpu/drm/nouveau/nouveau_abi16.c args.start += chan->ntfy_vma->addr; start 561 drivers/gpu/drm/nouveau/nouveau_abi16.c args.start += drm->agp.base + chan->ntfy->bo.offset; start 566 drivers/gpu/drm/nouveau/nouveau_abi16.c args.start += chan->ntfy->bo.offset; start 1049 drivers/gpu/drm/nouveau/nouveau_bo.c u32 src_offset = old_reg->start << PAGE_SHIFT; start 1050 drivers/gpu/drm/nouveau/nouveau_bo.c u32 dst_offset = new_reg->start << PAGE_SHIFT; start 1335 drivers/gpu/drm/nouveau/nouveau_bo.c u64 offset = new_reg->start << PAGE_SHIFT; start 1459 drivers/gpu/drm/nouveau/nouveau_bo.c reg->bus.offset = reg->start << PAGE_SHIFT; start 1469 drivers/gpu/drm/nouveau/nouveau_bo.c reg->bus.offset = reg->start << PAGE_SHIFT; start 1566 drivers/gpu/drm/nouveau/nouveau_bo.c bo->mem.start + bo->mem.num_pages < mappable) start 180 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 191 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = nvxx_device(device)->func-> start 193 drivers/gpu/drm/nouveau/nouveau_chan.c args.limit = args.start + device->info.ram_user - 1; start 197 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 204 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = chan->drm->agp.base; start 210 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 382 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 387 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 399 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 405 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = chan->drm->agp.base; start 411 drivers/gpu/drm/nouveau/nouveau_chan.c args.start = 0; start 175 drivers/gpu/drm/nouveau/nouveau_dmem.c .start = vmf->address, start 532 drivers/gpu/drm/nouveau/nouveau_dmem.c pfn_first = res->start >> PAGE_SHIFT; start 597 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long addr = args->start, nr_dma = 0, i; start 625 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long start, start 628 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long npages = (end - start) >> PAGE_SHIFT; start 633 drivers/gpu/drm/nouveau/nouveau_dmem.c .start = start, start 651 drivers/gpu/drm/nouveau/nouveau_dmem.c args.end = start + (c << PAGE_SHIFT); start 658 drivers/gpu/drm/nouveau/nouveau_dmem.c args.start = args.end; start 684 drivers/gpu/drm/nouveau/nouveau_dmem.c npages = (range->end - range->start) >> PAGE_SHIFT; start 38 drivers/gpu/drm/nouveau/nouveau_dmem.h unsigned long start, start 402 drivers/gpu/drm/nouveau/nouveau_drm.c .start = drm->notify->addr, start 171 drivers/gpu/drm/nouveau/nouveau_mem.c reg->start = mem->mem.addr >> PAGE_SHIFT; start 93 drivers/gpu/drm/nouveau/nouveau_svm.c unsigned long start; start 245 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) start 247 drivers/gpu/drm/nouveau/nouveau_svm.c if (limit > start) { start 252 drivers/gpu/drm/nouveau/nouveau_svm.c .addr = start, start 253 drivers/gpu/drm/nouveau/nouveau_svm.c .size = limit - start, start 264 drivers/gpu/drm/nouveau/nouveau_svm.c unsigned long start = update->start; start 270 drivers/gpu/drm/nouveau/nouveau_svm.c SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); start 273 drivers/gpu/drm/nouveau/nouveau_svm.c if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { start 274 drivers/gpu/drm/nouveau/nouveau_svm.c if (start < svmm->unmanaged.start) { start 275 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svmm_invalidate(svmm, start, start 278 drivers/gpu/drm/nouveau/nouveau_svm.c start = svmm->unmanaged.limit; start 281 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svmm_invalidate(svmm, start, limit); start 321 drivers/gpu/drm/nouveau/nouveau_svm.c svmm->unmanaged.start = args->unmanaged_addr; start 542 drivers/gpu/drm/nouveau/nouveau_svm.c u64 inst, start, limit; start 606 drivers/gpu/drm/nouveau/nouveau_svm.c start = buffer->fault[fi]->addr; start 607 drivers/gpu/drm/nouveau/nouveau_svm.c limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT); start 608 drivers/gpu/drm/nouveau/nouveau_svm.c if (start < svmm->unmanaged.limit) start 609 drivers/gpu/drm/nouveau/nouveau_svm.c limit = min_t(u64, limit, svmm->unmanaged.start); start 611 drivers/gpu/drm/nouveau/nouveau_svm.c if (limit > svmm->unmanaged.start) start 612 drivers/gpu/drm/nouveau/nouveau_svm.c start = max_t(u64, start, svmm->unmanaged.limit); start 613 drivers/gpu/drm/nouveau/nouveau_svm.c SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); start 619 drivers/gpu/drm/nouveau/nouveau_svm.c vma = find_vma_intersection(svmm->mm, start, limit); start 621 drivers/gpu/drm/nouveau/nouveau_svm.c SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit); start 626 drivers/gpu/drm/nouveau/nouveau_svm.c start = max_t(u64, start, vma->vm_start); start 628 drivers/gpu/drm/nouveau/nouveau_svm.c SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); start 630 drivers/gpu/drm/nouveau/nouveau_svm.c if (buffer->fault[fi]->addr != start) { start 642 drivers/gpu/drm/nouveau/nouveau_svm.c args.i.p.addr = start; start 689 drivers/gpu/drm/nouveau/nouveau_svm.c range.start = args.i.p.addr; start 723 drivers/gpu/drm/nouveau/nouveau_svm.c pi = (fault->addr - range.start) >> PAGE_SHIFT; start 110 drivers/gpu/drm/nouveau/nouveau_ttm.c reg->start = 0; start 149 drivers/gpu/drm/nouveau/nouveau_ttm.c reg->start = mem->vma[0].addr >> PAGE_SHIFT; start 80 drivers/gpu/drm/nouveau/nv17_fence.c u32 start = reg->start * PAGE_SIZE; start 81 drivers/gpu/drm/nouveau/nv17_fence.c u32 limit = start + reg->size - 1; start 97 drivers/gpu/drm/nouveau/nv17_fence.c .start = start, start 41 drivers/gpu/drm/nouveau/nv50_fence.c u32 start = reg->start * PAGE_SIZE; start 42 drivers/gpu/drm/nouveau/nv50_fence.c u32 limit = start + reg->size - 1; start 58 drivers/gpu/drm/nouveau/nv50_fence.c .start = start, start 138 drivers/gpu/drm/nouveau/nvif/vmm.c vmm->start = args->addr; start 203 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c return res ? res->start : 0; start 93 drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c args->v0.start, args->v0.limit); start 96 drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c dmaobj->start = args->v0.start; start 104 drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c if (dmaobj->start > dmaobj->limit) start 53 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start)); start 55 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c upper_32_bits(dmaobj->base.start)); start 51 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8); start 43 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c u64 start = dmaobj->base.start >> 8; start 51 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(start)); start 52 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c nvkm_wo32(*pgpuobj, 0x08, upper_32_bits(start)); start 46 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c u64 offset = dmaobj->base.start & 0xfffff000; start 47 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c u64 adjust = dmaobj->base.start & 0x00000fff; start 48 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c u32 length = dmaobj->base.limit - dmaobj->base.start; start 54 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c if (!dmaobj->base.start) start 53 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start)); start 55 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c upper_32_bits(dmaobj->base.start)); start 56 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c return fifo->func->start(fifo, flags); start 138 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c u64 limit, start; start 145 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c start = engn->addr; start 150 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start)); start 152 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c upper_32_bits(start)); start 107 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c u64 limit, start; start 114 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c start = engn->addr; start 119 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start)); start 121 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c upper_32_bits(start)); start 48 drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c .start = nv04_fifo_start, start 353 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c .start = nv04_fifo_start, start 47 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c .start = nv04_fifo_start, start 85 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c .start = nv04_fifo_start, start 116 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c .start = nv04_fifo_start, start 135 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c .start = nv04_fifo_start, start 27 drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h void (*start)(struct nvkm_fifo *, unsigned long *); start 124 drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c u64 start; start 130 drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c start = nvkm_timer_read(tmr); start 149 drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c !(timeout = nvkm_timer_read(tmr) - start > 2000000000)); start 27 drivers/gpu/drm/nouveau/nvkm/falcon/base.c nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, start 36 drivers/gpu/drm/nouveau/nvkm/falcon/base.c falcon->func->load_imem(falcon, data, start, size, tag, port, start 41 drivers/gpu/drm/nouveau/nvkm/falcon/base.c nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, start 46 drivers/gpu/drm/nouveau/nvkm/falcon/base.c falcon->func->load_dmem(falcon, data, start, size, port); start 52 drivers/gpu/drm/nouveau/nvkm/falcon/base.c nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, start 57 drivers/gpu/drm/nouveau/nvkm/falcon/base.c falcon->func->read_dmem(falcon, start, size, port, data); start 83 drivers/gpu/drm/nouveau/nvkm/falcon/base.c falcon->func->start(falcon); start 29 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, start 38 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c reg = start | BIT(24) | (secure ? BIT(28) : 0); start 68 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start, start 76 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24)); start 95 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, start 101 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c if (start >= EMEM_START_ADDR && falcon->has_emem) start 103 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c start - EMEM_START_ADDR, size, start 108 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24)); start 125 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size, start 133 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25)); start 152 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, start 158 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c if (start >= EMEM_START_ADDR && falcon->has_emem) start 159 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR, start 164 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25)); start 369 drivers/gpu/drm/nouveau/nvkm/falcon/v1.c .start = nvkm_falcon_v1_start, start 112 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c u64 start, limit, size; start 129 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c start = 0x0100000000ULL; start 133 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c limit = start + size; start 135 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0, start 158 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start)); start 160 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c upper_32_bits(start)); start 169 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c start = 0x0000000000ULL; start 173 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c limit = start + size; start 175 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0, start 194 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start)); start 196 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c upper_32_bits(start)); start 42 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c const u32 start = bios->size; start 45 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c u32 read = mthd->func->read(data, start, limit - start, bios); start 46 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c bios->size = start + read; start 50 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c u32 start = offset & ~0x00000fff; start 51 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c u32 fetch = limit - start; start 54 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch); start 71 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c u32 start = offset & ~0xfff; start 75 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c while (start + fetch < limit) { start 77 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c start + fetch, start 31 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit) start 34 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c nvkm_wr32(device, 0x17e8cc, start); start 30 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit) start 33 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c nvkm_wr32(device, 0x17e270, start); start 16 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h void (*cbc_clear)(struct nvkm_ltc *, u32 start, u32 limit); start 416 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c args->v0.addr = uvmm->vmm->start; start 634 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 start = addr; start 664 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if ((size = addr - start)) start 665 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_sparse(vmm, start, size, false); start 993 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c const u64 limit = vmm->limit - vmm->start; start 999 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_put(vmm, page, vmm->start, limit); start 1098 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vmm->start = 0; start 1124 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vmm->start = addr; start 1126 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits)) start 1129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) start 1179 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 start = addr; start 1188 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c size = min(limit - start, vma->size - (start - vma->addr)); start 1191 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c start, size, false, true); start 1193 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false); start 1199 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c } while ((vma = node(vma, next)) && (start = vma->addr) < limit); start 1216 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 start = addr; start 1241 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 size = limit - start; start 1242 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 addr = start; start 1315 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c start += size; start 1328 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c } while (vma && start < limit); start 1818 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c const u64 limit = vmm->limit - vmm->start; start 1824 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit); start 1828 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false, start 353 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) { start 76 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h struct {u32 start; u32 size; } load_ovl[64]; start 130 drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c u64 start, end, tach; start 142 drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c start = nvkm_timer_read(tmr); start 152 drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c if (!start) start 153 drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c start = nvkm_timer_read(tmr); start 157 drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c } while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000); start 162 drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c do_div(tach, (end - start)); start 50 drivers/gpu/drm/omapdrm/dss/dispc.c #define REG_GET(dispc, idx, start, end) \ start 51 drivers/gpu/drm/omapdrm/dss/dispc.c FLD_GET(dispc_read_reg(dispc, idx), start, end) start 53 drivers/gpu/drm/omapdrm/dss/dispc.c #define REG_FLD_MOD(dispc, idx, val, start, end) \ start 55 drivers/gpu/drm/omapdrm/dss/dispc.c FLD_MOD(dispc_read_reg(dispc, idx), val, start, end)) start 404 drivers/gpu/drm/omapdrm/dss/dispc.c u8 *start, u8 *end) start 409 drivers/gpu/drm/omapdrm/dss/dispc.c *start = dispc->feat->reg_fields[id].start; start 1374 drivers/gpu/drm/omapdrm/dss/dispc.c u8 start, end; start 1380 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_get_reg_field(dispc, FEAT_REG_FIFOSIZE, &start, &end); start 1384 drivers/gpu/drm/omapdrm/dss/dispc.c start, end); start 111 drivers/gpu/drm/omapdrm/dss/dsi.c #define REG_GET(dsi, idx, start, end) \ start 112 drivers/gpu/drm/omapdrm/dss/dsi.c FLD_GET(dsi_read_reg(dsi, idx), start, end) start 114 drivers/gpu/drm/omapdrm/dss/dsi.c #define REG_FLD_MOD(dsi, idx, val, start, end) \ start 115 drivers/gpu/drm/omapdrm/dss/dsi.c dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end)) start 1149 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_FLD_GET(fld, start, end)\ start 1150 drivers/gpu/drm/omapdrm/dss/dsi.c FLD_GET(dsi_read_reg(dsi, DSI_##fld), start, end) start 5341 drivers/gpu/drm/omapdrm/dss/dsi.c while (d->address != 0 && d->address != dsi_mem->start) start 54 drivers/gpu/drm/omapdrm/dss/dss.c #define REG_GET(dss, idx, start, end) \ start 55 drivers/gpu/drm/omapdrm/dss/dss.c FLD_GET(dss_read_reg(dss, idx), start, end) start 57 drivers/gpu/drm/omapdrm/dss/dss.c #define REG_FLD_MOD(dss, idx, val, start, end) \ start 59 drivers/gpu/drm/omapdrm/dss/dss.c FLD_MOD(dss_read_reg(dss, idx), val, start, end)) start 432 drivers/gpu/drm/omapdrm/dss/dss.c dss->feat->dispc_clk_switch.start, start 64 drivers/gpu/drm/omapdrm/dss/dss.h #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) start 65 drivers/gpu/drm/omapdrm/dss/dss.h #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) start 66 drivers/gpu/drm/omapdrm/dss/dss.h #define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) start 67 drivers/gpu/drm/omapdrm/dss/dss.h #define FLD_MOD(orig, val, start, end) \ start 68 drivers/gpu/drm/omapdrm/dss/dss.h (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) start 196 drivers/gpu/drm/omapdrm/dss/dss.h u8 start, end; start 276 drivers/gpu/drm/omapdrm/dss/hdmi.h #define REG_FLD_MOD(base, idx, val, start, end) \ start 278 drivers/gpu/drm/omapdrm/dss/hdmi.h val, start, end)) start 279 drivers/gpu/drm/omapdrm/dss/hdmi.h #define REG_GET(base, idx, start, end) \ start 280 drivers/gpu/drm/omapdrm/dss/hdmi.h FLD_GET(hdmi_read_reg(base, idx), start, end) start 288 drivers/gpu/drm/omapdrm/dss/hdmi_wp.c wp->phys_base = res->start; start 25 drivers/gpu/drm/omapdrm/dss/video-pll.c #define REG_MOD(reg, val, start, end) \ start 26 drivers/gpu/drm/omapdrm/dss/video-pll.c writel_relaxed(FLD_MOD(readl_relaxed(reg), val, start, end), reg) start 97 drivers/gpu/drm/omapdrm/omap_dmm_priv.h u32 start:4; start 376 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c .start = 1, start 816 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c omap_dmm->phys_base = mem->start; start 817 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c omap_dmm->base = ioremap(mem->start, SZ_2K); start 1129 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c bool start = read_map_pt(map, xdiv, start 1139 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c start ? '<' : 'X'); start 106 drivers/gpu/drm/panfrost/panfrost_drv.c args->offset = mapping->mmnode.start << PAGE_SHIFT; start 380 drivers/gpu/drm/panfrost/panfrost_drv.c args->offset = mapping->mmnode.start << PAGE_SHIFT; start 459 drivers/gpu/drm/panfrost/panfrost_drv.c u64 *start, u64 *end) start 465 drivers/gpu/drm/panfrost/panfrost_drv.c if ((*start & PFN_4G_MASK) == 0) start 466 drivers/gpu/drm/panfrost/panfrost_drv.c (*start)++; start 471 drivers/gpu/drm/panfrost/panfrost_drv.c next_seg = ALIGN(*start, PFN_4G); start 472 drivers/gpu/drm/panfrost/panfrost_drv.c if (next_seg - *start <= PFN_16M) start 473 drivers/gpu/drm/panfrost/panfrost_drv.c *start = next_seg + 1; start 475 drivers/gpu/drm/panfrost/panfrost_drv.c *end = min(*end, ALIGN(*start, PFN_4G) - 1); start 295 drivers/gpu/drm/panfrost/panfrost_mmu.c mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, start 308 drivers/gpu/drm/panfrost/panfrost_mmu.c u64 iova = mapping->mmnode.start << PAGE_SHIFT; start 331 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping->mmnode.start << PAGE_SHIFT, len); start 429 drivers/gpu/drm/panfrost/panfrost_mmu.c if (offset >= node->start && start 430 drivers/gpu/drm/panfrost/panfrost_mmu.c offset < (node->start + node->size)) { start 464 drivers/gpu/drm/panfrost/panfrost_mmu.c bomapping->mmnode.start << PAGE_SHIFT); start 473 drivers/gpu/drm/panfrost/panfrost_mmu.c page_offset -= bomapping->mmnode.start; start 52 drivers/gpu/drm/panfrost/panfrost_perfcnt.c gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; start 617 drivers/gpu/drm/qxl/qxl_cmd.c int start = 0; start 623 drivers/gpu/drm/qxl/qxl_cmd.c start = qdev->last_alloced_surf_id + 1; start 626 drivers/gpu/drm/qxl/qxl_cmd.c for (i = start; i < start + qdev->rom->n_surfaces; i++) { start 182 drivers/gpu/drm/qxl/qxl_ttm.c mem->bus.offset = mem->start << PAGE_SHIFT; start 187 drivers/gpu/drm/qxl/qxl_ttm.c mem->bus.offset = mem->start << PAGE_SHIFT; start 214 drivers/gpu/drm/qxl/qxl_ttm.c gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); start 546 drivers/gpu/drm/r128/r128_cce.c dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; start 75 drivers/gpu/drm/r128/r128_drv.h u32 *start; start 497 drivers/gpu/drm/r128/r128_drv.h ring = dev_priv->ring.start; \ start 515 drivers/gpu/drm/r128/r128_drv.h dev_priv->ring.start, \ start 641 drivers/gpu/drm/r128/r128_state.c struct drm_buf *buf, int start, int end) start 646 drivers/gpu/drm/r128/r128_state.c DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); start 648 drivers/gpu/drm/r128/r128_state.c if (start != end) { start 649 drivers/gpu/drm/r128/r128_state.c int offset = buf->bus_address + start; start 650 drivers/gpu/drm/r128/r128_state.c int dwords = (end - start + 3) / sizeof(u32); start 659 drivers/gpu/drm/r128/r128_state.c + buf->offset + start); start 697 drivers/gpu/drm/r128/r128_state.c int start, int end, int count) start 709 drivers/gpu/drm/r128/r128_state.c DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count); start 714 drivers/gpu/drm/r128/r128_state.c if (start != end) { start 720 drivers/gpu/drm/r128/r128_state.c dwords = (end - start + 3) / sizeof(u32); start 723 drivers/gpu/drm/r128/r128_state.c + buf->offset + start); start 750 drivers/gpu/drm/r128/r128_state.c r128_cce_dispatch_indirect(dev, buf, start, end); start 1380 drivers/gpu/drm/r128/r128_state.c elts->idx, elts->start, elts->end, elts->discard); start 1409 drivers/gpu/drm/r128/r128_state.c count = (elts->end - elts->start) / sizeof(u16); start 1410 drivers/gpu/drm/r128/r128_state.c elts->start -= R128_INDEX_PRIM_OFFSET; start 1412 drivers/gpu/drm/r128/r128_state.c if (elts->start & 0x7) { start 1413 drivers/gpu/drm/r128/r128_state.c DRM_ERROR("misaligned buffer 0x%x\n", elts->start); start 1416 drivers/gpu/drm/r128/r128_state.c if (elts->start < buf->used) { start 1417 drivers/gpu/drm/r128/r128_state.c DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); start 1425 drivers/gpu/drm/r128/r128_state.c r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count); start 1528 drivers/gpu/drm/r128/r128_state.c indirect->idx, indirect->start, indirect->end, start 1550 drivers/gpu/drm/r128/r128_state.c if (indirect->start < buf->used) { start 1552 drivers/gpu/drm/r128/r128_state.c indirect->start, buf->used); start 1575 drivers/gpu/drm/r128/r128_state.c r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end); start 63 drivers/gpu/drm/radeon/atom.c uint16_t start; start 728 drivers/gpu/drm/radeon/atom.c if (ctx->last_jump == (ctx->start + target)) { start 741 drivers/gpu/drm/radeon/atom.c ctx->last_jump = ctx->start + target; start 744 drivers/gpu/drm/radeon/atom.c *ptr = ctx->start + target; start 841 drivers/gpu/drm/radeon/atom.c ctx->ctx->data_block = ctx->start; start 988 drivers/gpu/drm/radeon/atom.c *ptr = ctx->start + target; start 1178 drivers/gpu/drm/radeon/atom.c ectx.start = base; start 3397 drivers/gpu/drm/radeon/radeon_atombios.c u8 *start = (u8 *)v1; start 3400 drivers/gpu/drm/radeon/radeon_atombios.c ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset); start 3414 drivers/gpu/drm/radeon/radeon_atombios.c u8 *start = (u8*)v2; start 3417 drivers/gpu/drm/radeon/radeon_atombios.c ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset); start 3431 drivers/gpu/drm/radeon/radeon_atombios.c u8 *start = (u8*)v3; start 3434 drivers/gpu/drm/radeon/radeon_atombios.c ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); start 586 drivers/gpu/drm/radeon/radeon_gem.c if (bo_va->it.start) start 683 drivers/gpu/drm/radeon/radeon_gem.c if (bo_va->it.start) { start 685 drivers/gpu/drm/radeon/radeon_gem.c args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; start 83 drivers/gpu/drm/radeon/radeon_mn.c it = interval_tree_iter_first(&rmn->objects, range->start, end); start 95 drivers/gpu/drm/radeon/radeon_mn.c it = interval_tree_iter_next(it, range->start, end); start 132 drivers/gpu/drm/radeon/radeon_mn.c .start = 0, start 197 drivers/gpu/drm/radeon/radeon_mn.c addr = min(it->start, addr); start 212 drivers/gpu/drm/radeon/radeon_mn.c node->it.start = addr; start 655 drivers/gpu/drm/radeon/radeon_object.c bo->tbo.mem.start << PAGE_SHIFT, start 814 drivers/gpu/drm/radeon/radeon_object.c offset = bo->mem.start << PAGE_SHIFT; start 839 drivers/gpu/drm/radeon/radeon_object.c offset = bo->mem.start << PAGE_SHIFT; start 75 drivers/gpu/drm/radeon/radeon_trace.h __entry->soffset = bo_va->it.start; start 149 drivers/gpu/drm/radeon/radeon_ttm.c bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { start 214 drivers/gpu/drm/radeon/radeon_ttm.c old_start = (u64)old_mem->start << PAGE_SHIFT; start 215 drivers/gpu/drm/radeon/radeon_ttm.c new_start = (u64)new_mem->start << PAGE_SHIFT; start 422 drivers/gpu/drm/radeon/radeon_ttm.c mem->bus.offset = mem->start << PAGE_SHIFT; start 429 drivers/gpu/drm/radeon/radeon_ttm.c mem->bus.offset = mem->start << PAGE_SHIFT; start 589 drivers/gpu/drm/radeon/radeon_ttm.c gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); start 579 drivers/gpu/drm/radeon/radeon_uvd.c uint64_t start, end; start 592 drivers/gpu/drm/radeon/radeon_uvd.c start = reloc->gpu_offset; start 593 drivers/gpu/drm/radeon/radeon_uvd.c end = start + radeon_bo_size(reloc->robj); start 594 drivers/gpu/drm/radeon/radeon_uvd.c start += offset; start 596 drivers/gpu/drm/radeon/radeon_uvd.c p->ib.ptr[data0] = start & 0xFFFFFFFF; start 597 drivers/gpu/drm/radeon/radeon_uvd.c p->ib.ptr[data1] = start >> 32; start 602 drivers/gpu/drm/radeon/radeon_uvd.c if (end <= start) { start 606 drivers/gpu/drm/radeon/radeon_uvd.c if ((end - start) < buf_sizes[cmd]) { start 608 drivers/gpu/drm/radeon/radeon_uvd.c (unsigned)(end - start), buf_sizes[cmd]); start 617 drivers/gpu/drm/radeon/radeon_uvd.c if ((start >> 28) != ((end - 1) >> 28)) { start 619 drivers/gpu/drm/radeon/radeon_uvd.c start, end); start 625 drivers/gpu/drm/radeon/radeon_uvd.c (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { start 627 drivers/gpu/drm/radeon/radeon_uvd.c start, end); start 62 drivers/gpu/drm/radeon/radeon_vce.c uint8_t start, mid, end; start 107 drivers/gpu/drm/radeon/radeon_vce.c if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) start 126 drivers/gpu/drm/radeon/radeon_vce.c start, mid, end, rdev->vce.fb_version); start 128 drivers/gpu/drm/radeon/radeon_vce.c rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); start 475 drivers/gpu/drm/radeon/radeon_vce.c uint64_t start, end, offset; start 489 drivers/gpu/drm/radeon/radeon_vce.c start = reloc->gpu_offset; start 490 drivers/gpu/drm/radeon/radeon_vce.c end = start + radeon_bo_size(reloc->robj); start 491 drivers/gpu/drm/radeon/radeon_vce.c start += offset; start 493 drivers/gpu/drm/radeon/radeon_vce.c p->ib.ptr[lo] = start & 0xFFFFFFFF; start 494 drivers/gpu/drm/radeon/radeon_vce.c p->ib.ptr[hi] = start >> 32; start 496 drivers/gpu/drm/radeon/radeon_vce.c if (end <= start) { start 500 drivers/gpu/drm/radeon/radeon_vce.c if ((end - start) < size) { start 502 drivers/gpu/drm/radeon/radeon_vce.c (unsigned)(end - start), size); start 331 drivers/gpu/drm/radeon/radeon_vm.c bo_va->it.start = 0; start 489 drivers/gpu/drm/radeon/radeon_vm.c soffset, tmp->bo, tmp->it.start, tmp->it.last); start 496 drivers/gpu/drm/radeon/radeon_vm.c if (bo_va->it.start || bo_va->it.last) { start 505 drivers/gpu/drm/radeon/radeon_vm.c tmp->it.start = bo_va->it.start; start 512 drivers/gpu/drm/radeon/radeon_vm.c bo_va->it.start = 0; start 521 drivers/gpu/drm/radeon/radeon_vm.c bo_va->it.start = soffset; start 817 drivers/gpu/drm/radeon/radeon_vm.c uint64_t start, uint64_t end, start 826 drivers/gpu/drm/radeon/radeon_vm.c for (addr = start; addr < end; ) { start 887 drivers/gpu/drm/radeon/radeon_vm.c uint64_t start, uint64_t end, start 892 drivers/gpu/drm/radeon/radeon_vm.c start >>= radeon_vm_block_size; start 895 drivers/gpu/drm/radeon/radeon_vm.c for (i = start; i <= end; ++i) start 923 drivers/gpu/drm/radeon/radeon_vm.c if (!bo_va->it.start) { start 949 drivers/gpu/drm/radeon/radeon_vm.c addr = (u64)mem->start << PAGE_SHIFT; start 967 drivers/gpu/drm/radeon/radeon_vm.c nptes = bo_va->it.last - bo_va->it.start + 1; start 1012 drivers/gpu/drm/radeon/radeon_vm.c r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, start 1029 drivers/gpu/drm/radeon/radeon_vm.c radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); start 1127 drivers/gpu/drm/radeon/radeon_vm.c if (bo_va->it.start || bo_va->it.last) start 1132 drivers/gpu/drm/radeon/radeon_vm.c if (bo_va->it.start || bo_va->it.last) { start 1161 drivers/gpu/drm/radeon/radeon_vm.c (bo_va->it.start || bo_va->it.last)) start 203 drivers/gpu/drm/rcar-du/rcar_du_group.c static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) start 219 drivers/gpu/drm/rcar-du/rcar_du_group.c start ? DSYSR_DEN : DSYSR_DRES); start 222 drivers/gpu/drm/rcar-du/rcar_du_group.c start ? DSYSR_DEN : DSYSR_DRES); start 226 drivers/gpu/drm/rcar-du/rcar_du_group.c void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) start 240 drivers/gpu/drm/rcar-du/rcar_du_group.c if (start) { start 57 drivers/gpu/drm/rcar-du/rcar_du_group.h void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start); start 296 drivers/gpu/drm/rcar-du/rcar_du_of.c if (lvds_data[i].res.start == res.start) start 910 drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c if (cdata[i].reg == res->start) { start 79 drivers/gpu/drm/rockchip/rockchip_drm_drv.c u64 start, end; start 89 drivers/gpu/drm/rockchip/rockchip_drm_drv.c start = geometry->aperture_start; start 93 drivers/gpu/drm/rockchip/rockchip_drm_drv.c start, end); start 94 drivers/gpu/drm/rockchip/rockchip_drm_drv.c drm_mm_init(&private->mm, start, end - start + 1); start 36 drivers/gpu/drm/rockchip/rockchip_drm_gem.c rk_obj->dma_addr = rk_obj->mm.start; start 124 drivers/gpu/drm/savage/savage_state.c if(start <= reg && start+count > reg) \ start 125 drivers/gpu/drm/savage/savage_state.c dev_priv->state.where = regs[reg - start] start 127 drivers/gpu/drm/savage/savage_state.c if(start <= reg && start+count > reg) { \ start 129 drivers/gpu/drm/savage/savage_state.c tmp = regs[reg - start]; \ start 136 drivers/gpu/drm/savage/savage_state.c unsigned int start, unsigned int count, start 139 drivers/gpu/drm/savage/savage_state.c if (start < SAVAGE_TEXPALADDR_S3D || start 140 drivers/gpu/drm/savage/savage_state.c start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { start 142 drivers/gpu/drm/savage/savage_state.c start, start + count - 1); start 152 drivers/gpu/drm/savage/savage_state.c if (start <= SAVAGE_TEXCTRL_S3D && start 153 drivers/gpu/drm/savage/savage_state.c start + count > SAVAGE_TEXPALADDR_S3D) { start 166 drivers/gpu/drm/savage/savage_state.c unsigned int start, unsigned int count, start 171 drivers/gpu/drm/savage/savage_state.c if (start < SAVAGE_DRAWLOCALCTRL_S4 || start 172 drivers/gpu/drm/savage/savage_state.c start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { start 174 drivers/gpu/drm/savage/savage_state.c start, start + count - 1); start 184 drivers/gpu/drm/savage/savage_state.c if (start <= SAVAGE_TEXDESCR_S4 && start 185 drivers/gpu/drm/savage/savage_state.c start + count > SAVAGE_TEXPALADDR_S4) { start 209 drivers/gpu/drm/savage/savage_state.c unsigned int start = cmd_header->state.start; start 219 drivers/gpu/drm/savage/savage_state.c ret = savage_verify_state_s3d(dev_priv, start, count, regs); start 223 drivers/gpu/drm/savage/savage_state.c if (start < SAVAGE_SCSTART_S3D) { start 224 drivers/gpu/drm/savage/savage_state.c if (start + count > SAVAGE_SCEND_S3D + 1) start 225 drivers/gpu/drm/savage/savage_state.c count2 = count - (SAVAGE_SCEND_S3D + 1 - start); start 226 drivers/gpu/drm/savage/savage_state.c if (start + count > SAVAGE_SCSTART_S3D) start 227 drivers/gpu/drm/savage/savage_state.c count = SAVAGE_SCSTART_S3D - start; start 228 drivers/gpu/drm/savage/savage_state.c } else if (start <= SAVAGE_SCEND_S3D) { start 229 drivers/gpu/drm/savage/savage_state.c if (start + count > SAVAGE_SCEND_S3D + 1) { start 230 drivers/gpu/drm/savage/savage_state.c count -= SAVAGE_SCEND_S3D + 1 - start; start 231 drivers/gpu/drm/savage/savage_state.c start = SAVAGE_SCEND_S3D + 1; start 236 drivers/gpu/drm/savage/savage_state.c ret = savage_verify_state_s4(dev_priv, start, count, regs); start 240 drivers/gpu/drm/savage/savage_state.c if (start < SAVAGE_DRAWCTRL0_S4) { start 241 drivers/gpu/drm/savage/savage_state.c if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) start 243 drivers/gpu/drm/savage/savage_state.c (SAVAGE_DRAWCTRL1_S4 + 1 - start); start 244 drivers/gpu/drm/savage/savage_state.c if (start + count > SAVAGE_DRAWCTRL0_S4) start 245 drivers/gpu/drm/savage/savage_state.c count = SAVAGE_DRAWCTRL0_S4 - start; start 246 drivers/gpu/drm/savage/savage_state.c } else if (start <= SAVAGE_DRAWCTRL1_S4) { start 247 drivers/gpu/drm/savage/savage_state.c if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { start 248 drivers/gpu/drm/savage/savage_state.c count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; start 249 drivers/gpu/drm/savage/savage_state.c start = SAVAGE_DRAWCTRL1_S4 + 1; start 268 drivers/gpu/drm/savage/savage_state.c DMA_SET_REGISTERS(start, n); start 271 drivers/gpu/drm/savage/savage_state.c start += n; start 274 drivers/gpu/drm/savage/savage_state.c start += 2; start 293 drivers/gpu/drm/savage/savage_state.c unsigned int start = cmd_header->prim.start; start 350 drivers/gpu/drm/savage/savage_state.c if (start + n > dmabuf->total / 32) { start 352 drivers/gpu/drm/savage/savage_state.c start, start + n - 1, dmabuf->total / 32); start 388 drivers/gpu/drm/savage/savage_state.c reorder[start % 3] = 2; start 391 drivers/gpu/drm/savage/savage_state.c BCI_DRAW_INDICES_S3D(count, prim, start + 2); start 393 drivers/gpu/drm/savage/savage_state.c for (i = start + 1; i + 1 < start + count; i += 2) start 397 drivers/gpu/drm/savage/savage_state.c if (i < start + count) start 401 drivers/gpu/drm/savage/savage_state.c BCI_DRAW_INDICES_S3D(count, prim, start); start 403 drivers/gpu/drm/savage/savage_state.c for (i = start + 1; i + 1 < start + count; i += 2) start 405 drivers/gpu/drm/savage/savage_state.c if (i < start + count) start 411 drivers/gpu/drm/savage/savage_state.c for (i = start; i + 1 < start + count; i += 2) start 413 drivers/gpu/drm/savage/savage_state.c if (i < start + count) start 417 drivers/gpu/drm/savage/savage_state.c start += count; start 435 drivers/gpu/drm/savage/savage_state.c unsigned int start = cmd_header->prim.start; start 493 drivers/gpu/drm/savage/savage_state.c if (start + n > vb_size / (vb_stride * 4)) { start 495 drivers/gpu/drm/savage/savage_state.c start, start + n - 1, vb_size / (vb_stride * 4)); start 508 drivers/gpu/drm/savage/savage_state.c reorder[start % 3] = 2; start 513 drivers/gpu/drm/savage/savage_state.c for (i = start; i < start + count; ++i) { start 524 drivers/gpu/drm/savage/savage_state.c DMA_COPY(&vtxbuf[vb_stride * start], start 527 drivers/gpu/drm/savage/savage_state.c for (i = start; i < start + count; ++i) { start 536 drivers/gpu/drm/savage/savage_state.c start += count; start 904 drivers/gpu/drm/savage/savage_state.c const drm_savage_cmd_header_t *start, start 919 drivers/gpu/drm/savage/savage_state.c cmdbuf = start; start 77 drivers/gpu/drm/selftests/test-drm_mm.c static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) start 84 drivers/gpu/drm/selftests/test-drm_mm.c if (end <= start) start 89 drivers/gpu/drm/selftests/test-drm_mm.c if (start != hole_start || end != hole_end) { start 93 drivers/gpu/drm/selftests/test-drm_mm.c start, end); start 118 drivers/gpu/drm/selftests/test-drm_mm.c if (node->start != addr) { start 120 drivers/gpu/drm/selftests/test-drm_mm.c n, addr, node->start); start 139 drivers/gpu/drm/selftests/test-drm_mm.c node->start, check->start); start 164 drivers/gpu/drm/selftests/test-drm_mm.c div64_u64_rem(node->start, alignment, &rem); start 186 drivers/gpu/drm/selftests/test-drm_mm.c node->start, misalignment(node, alignment), alignment); start 236 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start = 0; start 277 drivers/gpu/drm/selftests/test-drm_mm.c nodes[0].start = 512; start 282 drivers/gpu/drm/selftests/test-drm_mm.c nodes[0].start, nodes[0].size); start 287 drivers/gpu/drm/selftests/test-drm_mm.c nodes[1].start = 4096 - 512 - nodes[1].size; start 291 drivers/gpu/drm/selftests/test-drm_mm.c nodes[1].start, nodes[1].size); start 300 drivers/gpu/drm/selftests/test-drm_mm.c u64 start, u64 size) start 302 drivers/gpu/drm/selftests/test-drm_mm.c node->start = start; start 317 drivers/gpu/drm/selftests/test-drm_mm.c node->start, node->size); start 321 drivers/gpu/drm/selftests/test-drm_mm.c err, -ENOSPC, node->start, node->size); start 331 drivers/gpu/drm/selftests/test-drm_mm.c u64 start, size; start 360 drivers/gpu/drm/selftests/test-drm_mm.c boundaries[n].start, start 404 drivers/gpu/drm/selftests/test-drm_mm.c nodes[n].start = order[n] * size; start 410 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start); start 417 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start); start 441 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start); start 475 drivers/gpu/drm/selftests/test-drm_mm.c m, n, node->start); start 559 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start, tmp.size); start 620 drivers/gpu/drm/selftests/test-drm_mm.c if (tmp.start != nodes[n].start) { start 622 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start, size, start 623 drivers/gpu/drm/selftests/test-drm_mm.c nodes[n].start, nodes[n].size); start 639 drivers/gpu/drm/selftests/test-drm_mm.c u64 addr = nodes[n].start; start 648 drivers/gpu/drm/selftests/test-drm_mm.c if (nodes[n].start != addr) { start 650 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n, addr, nodes[n].start); start 805 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start, tmp.size, range_start, range_end); start 817 drivers/gpu/drm/selftests/test-drm_mm.c u64 start, start 823 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert_in_range_fail(mm, size, start, end)) start 826 drivers/gpu/drm/selftests/test-drm_mm.c n = div64_u64(start + size - 1, size); start 828 drivers/gpu/drm/selftests/test-drm_mm.c if (node->start < start || node->start + node->size > end) { start 830 drivers/gpu/drm/selftests/test-drm_mm.c n, node->start, node->start + node->size, start, end); start 834 drivers/gpu/drm/selftests/test-drm_mm.c if (node->start != n * size) { start 836 drivers/gpu/drm/selftests/test-drm_mm.c n, n * size, node->start); start 855 drivers/gpu/drm/selftests/test-drm_mm.c if (start > 0) { start 856 drivers/gpu/drm/selftests/test-drm_mm.c node = __drm_mm_interval_first(mm, 0, start - 1); start 859 drivers/gpu/drm/selftests/test-drm_mm.c node->start, node->size, start); start 868 drivers/gpu/drm/selftests/test-drm_mm.c node->start, node->size, end); start 876 drivers/gpu/drm/selftests/test-drm_mm.c static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end) start 886 drivers/gpu/drm/selftests/test-drm_mm.c DRM_MM_BUG_ON(end <= start); start 900 drivers/gpu/drm/selftests/test-drm_mm.c start_n = div64_u64(start + size - 1, size); start 907 drivers/gpu/drm/selftests/test-drm_mm.c start, end, mode)) { start 911 drivers/gpu/drm/selftests/test-drm_mm.c start, end); start 916 drivers/gpu/drm/selftests/test-drm_mm.c if (!assert_contiguous_in_range(&mm, size, start, end)) { start 918 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, start, end, size); start 924 drivers/gpu/drm/selftests/test-drm_mm.c u64 addr = nodes[n].start; start 929 drivers/gpu/drm/selftests/test-drm_mm.c start, end, mode)) { start 934 drivers/gpu/drm/selftests/test-drm_mm.c if (nodes[n].start != addr) { start 936 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n, addr, nodes[n].start); start 941 drivers/gpu/drm/selftests/test-drm_mm.c if (!assert_contiguous_in_range(&mm, size, start, end)) { start 943 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, start, end, size); start 967 drivers/gpu/drm/selftests/test-drm_mm.c const unsigned int start = 1024; start 969 drivers/gpu/drm/selftests/test-drm_mm.c const unsigned int size = end - start; start 971 drivers/gpu/drm/selftests/test-drm_mm.c drm_mm_init(&mm, start, size); start 973 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert_in_range_fail(&mm, 1, 0, start)) start 977 drivers/gpu/drm/selftests/test-drm_mm.c start - size/2, start + (size+1)/2)) start 1162 drivers/gpu/drm/selftests/test-drm_mm.c hole->start, hole->size, hole->color); start 1167 drivers/gpu/drm/selftests/test-drm_mm.c next->start, next->size, next->color); start 1304 drivers/gpu/drm/selftests/test-drm_mm.c e->node.start); start 1322 drivers/gpu/drm/selftests/test-drm_mm.c e->node.start); start 1365 drivers/gpu/drm/selftests/test-drm_mm.c if (tmp.start < range_start || tmp.start + tmp.size > range_end) { start 1367 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start, tmp.size, range_start, range_end); start 1376 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start, drm_mm_hole_follows(&tmp)); start 1388 drivers/gpu/drm/selftests/test-drm_mm.c e->node.start); start 1610 drivers/gpu/drm/selftests/test-drm_mm.c return div64_u64(node->start, node->size); start 1656 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start, size); start 1688 drivers/gpu/drm/selftests/test-drm_mm.c m, n, node->start); start 1836 drivers/gpu/drm/selftests/test-drm_mm.c rsvd_lo.start = 1; start 1845 drivers/gpu/drm/selftests/test-drm_mm.c rsvd_hi.start = 5; start 1865 drivers/gpu/drm/selftests/test-drm_mm.c node.start); start 1900 drivers/gpu/drm/selftests/test-drm_mm.c u64 *start, start 1904 drivers/gpu/drm/selftests/test-drm_mm.c ++*start; start 1916 drivers/gpu/drm/selftests/test-drm_mm.c node->color, node->start, node->size, start 1918 drivers/gpu/drm/selftests/test-drm_mm.c list_next_entry(node, node_list)->start, start 1993 drivers/gpu/drm/selftests/test-drm_mm.c last = node->start + node->size; start 2004 drivers/gpu/drm/selftests/test-drm_mm.c node->start = last; start 2015 drivers/gpu/drm/selftests/test-drm_mm.c node->start += n + 1; start 2017 drivers/gpu/drm/selftests/test-drm_mm.c node->start += n + count - rem; start 2026 drivers/gpu/drm/selftests/test-drm_mm.c last = node->start + node->size; start 2059 drivers/gpu/drm/selftests/test-drm_mm.c div64_u64_rem(node->start, node->size, &rem); start 2062 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, node->start, node->size, rem); start 2119 drivers/gpu/drm/selftests/test-drm_mm.c if (tmp.start < range_start || tmp.start + tmp.size > range_end) { start 2121 drivers/gpu/drm/selftests/test-drm_mm.c tmp.start, tmp.size, range_start, range_end); start 2131 drivers/gpu/drm/selftests/test-drm_mm.c alignment, misalignment(&tmp, alignment), tmp.start); start 2143 drivers/gpu/drm/selftests/test-drm_mm.c e->node.start); start 118 drivers/gpu/drm/shmobile/shmob_drm_crtc.c static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start) start 124 drivers/gpu/drm/shmobile/shmob_drm_crtc.c if (start) start 132 drivers/gpu/drm/shmobile/shmob_drm_crtc.c if ((start && value) || (!start && !value)) start 138 drivers/gpu/drm/shmobile/shmob_drm_crtc.c if (!start) { start 116 drivers/gpu/drm/sis/sis_mm.c offset = item->mm_node.start; start 128 drivers/gpu/drm/sis/sis_mm.c offset = item->mm_node.start; start 207 drivers/gpu/drm/sti/sti_compositor.c compo->regs = devm_ioremap(dev, res->start, resource_size(res)); start 536 drivers/gpu/drm/sti/sti_dvo.c dvo->regs = devm_ioremap_nocache(dev, res->start, start 761 drivers/gpu/drm/sti/sti_hda.c hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 768 drivers/gpu/drm/sti/sti_hda.c hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start, start 893 drivers/gpu/drm/sti/sti_hdmi.c if (!hdmi->phy_ops->start(hdmi)) { start 1383 drivers/gpu/drm/sti/sti_hdmi.c hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 25 drivers/gpu/drm/sti/sti_hdmi.h bool (*start)(struct sti_hdmi *hdmi); start 211 drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c .start = sti_hdmi_tx3g4c28phy_start, start 1370 drivers/gpu/drm/sti/sti_hqvdp.c hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res)); start 863 drivers/gpu/drm/sti/sti_tvout.c tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 189 drivers/gpu/drm/sti/sti_vtg.c long clocksperline, start, stop; start 196 drivers/gpu/drm/sti/sti_vtg.c start = 0; start 199 drivers/gpu/drm/sti/sti_vtg.c start += delay; start 202 drivers/gpu/drm/sti/sti_vtg.c if (start < 0) start 203 drivers/gpu/drm/sti/sti_vtg.c start += clocksperline; start 204 drivers/gpu/drm/sti/sti_vtg.c else if (start >= clocksperline) start 205 drivers/gpu/drm/sti/sti_vtg.c start -= clocksperline; start 212 drivers/gpu/drm/sti/sti_vtg.c sync->hsync = (stop << 16) | start; start 396 drivers/gpu/drm/sti/sti_vtg.c vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 368 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100); start 369 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start; start 145 drivers/gpu/drm/tegra/drm.c dma_addr_t start, end; start 148 drivers/gpu/drm/tegra/drm.c start = tegra->domain->geometry.aperture_start & dma_mask; start 151 drivers/gpu/drm/tegra/drm.c gem_start = start; start 451 drivers/gpu/drm/tegra/dsi.c static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start, start 456 drivers/gpu/drm/tegra/dsi.c tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START); start 136 drivers/gpu/drm/tegra/gem.c bo->paddr = bo->mm->start; start 67 drivers/gpu/drm/tilcdc/tilcdc_crtc.c dma_addr_t start, end; start 72 drivers/gpu/drm/tilcdc/tilcdc_crtc.c start = gem->paddr + fb->offsets[0] + start 76 drivers/gpu/drm/tilcdc/tilcdc_crtc.c end = start + (crtc->mode.vdisplay * fb->pitches[0]); start 86 drivers/gpu/drm/tilcdc/tilcdc_crtc.c dma_base_and_ceiling = (u64)end << 32 | start; start 259 drivers/gpu/drm/tilcdc/tilcdc_drv.c priv->mmio = ioremap_nocache(res->start, resource_size(res)); start 456 drivers/gpu/drm/tiny/repaper.c u64 start = local_clock(); start 457 drivers/gpu/drm/tiny/repaper.c u64 end = start + (epd->factored_stage_time * 1000 * 1000); start 467 drivers/gpu/drm/tiny/repaper.c u64 start = local_clock(); start 468 drivers/gpu/drm/tiny/repaper.c u64 end = start + (epd->factored_stage_time * 1000 * 1000); start 119 drivers/gpu/drm/tiny/st7586.c int start, end, idx, ret = 0; start 138 drivers/gpu/drm/tiny/st7586.c start = rect->x1 / 3; start 142 drivers/gpu/drm/tiny/st7586.c (start >> 8) & 0xFF, start & 0xFF, start 150 drivers/gpu/drm/tiny/st7586.c (end - start) * (rect->y2 - rect->y1)); start 78 drivers/gpu/drm/ttm/ttm_agp_backend.c ret = agp_bind_memory(mem, node->start); start 403 drivers/gpu/drm/ttm/ttm_bo.c bo->offset = (bo->mem.start << PAGE_SHIFT) + start 764 drivers/gpu/drm/ttm/ttm_bo.c if (place->fpfn >= (bo->mem.start + bo->mem.size) || start 765 drivers/gpu/drm/ttm/ttm_bo.c (place->lpfn && place->lpfn <= bo->mem.start)) start 1219 drivers/gpu/drm/ttm/ttm_bo.c if (mem->mm_node && (mem->start < heap->fpfn || start 1220 drivers/gpu/drm/ttm/ttm_bo.c (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) start 86 drivers/gpu/drm/ttm/ttm_bo_manager.c mem->start = node->start; start 413 drivers/gpu/drm/ttm/ttm_bo_util.c (new_mem->start < old_mem->start + old_mem->size)) { start 162 drivers/gpu/drm/udl/udl_fb.c unsigned long start = vma->vm_start; start 185 drivers/gpu/drm/udl/udl_fb.c if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) start 188 drivers/gpu/drm/udl/udl_fb.c start += PAGE_SIZE; start 43 drivers/gpu/drm/udl/udl_transfer.c int start = width; start 48 drivers/gpu/drm/udl/udl_transfer.c start = j; start 60 drivers/gpu/drm/udl/udl_transfer.c identical = start + (width - end); start 61 drivers/gpu/drm/udl/udl_transfer.c *bfront = (u8 *) &front[start]; start 62 drivers/gpu/drm/udl/udl_transfer.c *width_bytes = (end - start) * sizeof(unsigned long); start 151 drivers/gpu/drm/udl/udl_transfer.c const u8 *const start = pixel; start 166 drivers/gpu/drm/udl/udl_transfer.c if (unlikely(pixel > start + bpp)) { start 168 drivers/gpu/drm/udl/udl_transfer.c *raw_pixels_count_byte = (((start - start 172 drivers/gpu/drm/udl/udl_transfer.c *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF; start 185 drivers/gpu/drm/v3d/v3d_bo.c args->offset = bo->node.start << PAGE_SHIFT; start 230 drivers/gpu/drm/v3d/v3d_bo.c args->offset = bo->node.start << PAGE_SHIFT; start 217 drivers/gpu/drm/v3d/v3d_drv.h u32 start, end; start 232 drivers/gpu/drm/v3d/v3d_drv.h u32 start, end; start 404 drivers/gpu/drm/v3d/v3d_gem.c ktime_t start = ktime_get(); start 418 drivers/gpu/drm/v3d/v3d_gem.c delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); start 547 drivers/gpu/drm/v3d/v3d_gem.c render->start = args->rcl_start; start 573 drivers/gpu/drm/v3d/v3d_gem.c bin->start = args->bcl_start; start 71 drivers/gpu/drm/v3d/v3d_irq.c V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); start 91 drivers/gpu/drm/v3d/v3d_mmu.c u32 page = bo->node.start; start 108 drivers/gpu/drm/v3d/v3d_mmu.c WARN_ON_ONCE(page - bo->node.start != start 121 drivers/gpu/drm/v3d/v3d_mmu.c for (page = bo->node.start; page < bo->node.start + npages; page++) start 121 drivers/gpu/drm/v3d/v3d_sched.c job->start, job->end); start 135 drivers/gpu/drm/v3d/v3d_sched.c V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); start 170 drivers/gpu/drm/v3d/v3d_sched.c job->start, job->end); start 177 drivers/gpu/drm/v3d/v3d_sched.c V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); start 536 drivers/gpu/drm/vc4/vc4_crtc.c vc4_state->mm.start); start 541 drivers/gpu/drm/vc4/vc4_crtc.c vc4_state->mm.start); start 693 drivers/gpu/drm/vc4/vc4_crtc.c u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start; start 792 drivers/gpu/drm/vc4/vc4_crtc.c (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) || start 369 drivers/gpu/drm/vc4/vc4_gem.c submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end) start 376 drivers/gpu/drm/vc4/vc4_gem.c V3D_WRITE(V3D_CTNCA(thread), start); start 1066 drivers/gpu/drm/vc4/vc4_gem.c unsigned long start = jiffies; start 1070 drivers/gpu/drm/vc4/vc4_gem.c uint64_t delta = jiffies_to_nsecs(jiffies - start); start 143 drivers/gpu/drm/vc4/vc4_hvs.c dst_kernel = hvs->dlist + space->start; start 575 drivers/gpu/drm/vc4/vc4_plane.c vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start; start 893 drivers/gpu/drm/vc4/vc4_plane.c u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start, start 166 drivers/gpu/drm/via/via_mm.c ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT); start 302 drivers/gpu/drm/via/via_verifier.c unsigned start = start 312 drivers/gpu/drm/via/via_verifier.c if (start > 9) start 313 drivers/gpu/drm/via/via_verifier.c start = 9; start 316 drivers/gpu/drm/via/via_verifier.c &(cur_seq->t_addr[tex = cur_seq->texture][start]); start 317 drivers/gpu/drm/via/via_verifier.c pitch = &(cur_seq->pitch[tex][start]); start 318 drivers/gpu/drm/via/via_verifier.c height = &(cur_seq->height[tex][start]); start 320 drivers/gpu/drm/via/via_verifier.c for (i = start; i <= end; ++i) { start 611 drivers/gpu/drm/virtio/virtgpu_vq.c size_t start = block * EDID_LENGTH; start 613 drivers/gpu/drm/virtio/virtgpu_vq.c if (start + len > le32_to_cpu(resp->size)) start 615 drivers/gpu/drm/virtio/virtgpu_vq.c memcpy(buf, resp->edid + start, len); start 247 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c bo->mem.start < bo->num_pages && start 248 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c bo->mem.start > 0 && start 322 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c ptr->gmrId = bo->mem.start; start 886 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c offset = header->node.start << PAGE_SHIFT; start 890 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; start 353 drivers/gpu/drm/vmwgfx/vmwgfx_context.c cmd->body.mobid = bo->mem.start; start 520 drivers/gpu/drm/vmwgfx/vmwgfx_context.c cmd->body.mobid = bo->mem.start; start 186 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c cmd->body.mobid = bo->mem.start; start 730 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c cmd->body.mobid = dx_query_mob->base.mem.start; start 3312 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c reloc->location->gmrId = bo->mem.start; start 3315 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c *reloc->mob_loc = bo->mem.start; start 325 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c unsigned long start, end, min, max; start 333 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c start = page->index << PAGE_SHIFT; start 334 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c end = start + PAGE_SIZE - 1; start 335 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c min = min(min, start); start 617 drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c cmd->body.guestResult.gmrId = bo->mem.start; start 659 drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c cmd->body.mobid = bo->mem.start; start 71 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c mem->start = id; start 91 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c ida_free(&gman->gmr_ida, mem->start); start 266 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c cmd->body.mobid = bo->mem.start; start 405 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c cmd->body.mobid = res->backup->base.mem.start; start 1165 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c cmd1->body.mobid = bo->mem.start; start 589 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c vmw_be->gmr_id = bo_mem->start; start 815 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c mem->bus.offset = mem->start << PAGE_SHIFT; start 279 drivers/gpu/host1x/dev.c dma_addr_t start, end; start 307 drivers/gpu/host1x/dev.c start = geometry->aperture_start & mask; start 311 drivers/gpu/host1x/dev.c init_iova_domain(&host->iova, 1UL << order, start >> order); start 37 drivers/gpu/host1x/dev.h void (*start)(struct host1x_cdma *cdma); start 245 drivers/gpu/host1x/dev.h host->cdma_op->start(cdma); start 46 drivers/gpu/host1x/hw/cdma_hw.c u64 start, end; start 52 drivers/gpu/host1x/hw/cdma_hw.c start = cdma->push_buffer.dma; start 59 drivers/gpu/host1x/hw/cdma_hw.c host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART); start 61 drivers/gpu/host1x/hw/cdma_hw.c host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI); start 93 drivers/gpu/host1x/hw/cdma_hw.c u64 start, end; start 103 drivers/gpu/host1x/hw/cdma_hw.c start = cdma->push_buffer.dma; start 107 drivers/gpu/host1x/hw/cdma_hw.c host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART); start 109 drivers/gpu/host1x/hw/cdma_hw.c host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI); start 318 drivers/gpu/host1x/hw/cdma_hw.c .start = cdma_start, start 71 drivers/gpu/host1x/hw/debug_hw_1x01.c u32 val, rd_ptr, wr_ptr, start, end; start 93 drivers/gpu/host1x/hw/debug_hw_1x01.c start = HOST1X_SYNC_CF_SETUP_BASE_V(val); start 114 drivers/gpu/host1x/hw/debug_hw_1x01.c rd_ptr = start; start 57 drivers/gpu/host1x/hw/debug_hw_1x06.c u32 rd_ptr, wr_ptr, start, end; start 89 drivers/gpu/host1x/hw/debug_hw_1x06.c start = HOST1X_HV_CMDFIFO_SETUP_BASE_V(val); start 105 drivers/gpu/host1x/hw/debug_hw_1x06.c rd_ptr - start, val); start 114 drivers/gpu/host1x/hw/debug_hw_1x06.c rd_ptr = start; start 1396 drivers/gpu/ipu-v3/ipu-common.c ipu_base = res->start; start 157 drivers/gpu/ipu-v3/ipu-di.c int start, int count) start 164 drivers/gpu/ipu-v3/ipu-di.c int wave_gen = start + i + 1; start 1463 drivers/gpu/vga/vgaarb.c resource_size_t start, end; start 1490 drivers/gpu/vga/vgaarb.c start = pci_resource_start(vgadev->pdev, i); start 1493 drivers/gpu/vga/vgaarb.c if (!start || !end) start 1496 drivers/gpu/vga/vgaarb.c if (base < start || limit >= end) start 715 drivers/hid/hid-core.c static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) start 719 drivers/hid/hid-core.c if ((end - start) <= 0) start 722 drivers/hid/hid-core.c b = *start++; start 731 drivers/hid/hid-core.c if ((end - start) < 2) start 734 drivers/hid/hid-core.c item->size = *start++; start 735 drivers/hid/hid-core.c item->tag = *start++; start 737 drivers/hid/hid-core.c if ((end - start) < item->size) start 740 drivers/hid/hid-core.c item->data.longdata = start; start 741 drivers/hid/hid-core.c start += item->size; start 742 drivers/hid/hid-core.c return start; start 750 drivers/hid/hid-core.c return start; start 753 drivers/hid/hid-core.c if ((end - start) < 1) start 755 drivers/hid/hid-core.c item->data.u8 = *start++; start 756 drivers/hid/hid-core.c return start; start 759 drivers/hid/hid-core.c if ((end - start) < 2) start 761 drivers/hid/hid-core.c item->data.u16 = get_unaligned_le16(start); start 762 drivers/hid/hid-core.c start = (__u8 *)((__le16 *)start + 1); start 763 drivers/hid/hid-core.c return start; start 767 drivers/hid/hid-core.c if ((end - start) < 4) start 769 drivers/hid/hid-core.c item->data.u32 = get_unaligned_le32(start); start 770 drivers/hid/hid-core.c start = (__u8 *)((__le32 *)start + 1); start 771 drivers/hid/hid-core.c return start; start 864 drivers/hid/hid-core.c __u8 *start = hid->dev_rdesc; start 865 drivers/hid/hid-core.c __u8 *end = start + hid->dev_rsize; start 886 drivers/hid/hid-core.c while ((start = fetch_item(start, end, &item)) != NULL) start 930 drivers/hid/hid-core.c int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) start 932 drivers/hid/hid-core.c hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); start 1188 drivers/hid/hid-core.c __u8 *start; start 1204 drivers/hid/hid-core.c start = device->dev_rdesc; start 1205 drivers/hid/hid-core.c if (WARN_ON(!start)) start 1209 drivers/hid/hid-core.c buf = kmemdup(start, size, GFP_KERNEL); start 1214 drivers/hid/hid-core.c start = device->driver->report_fixup(device, buf, &size); start 1216 drivers/hid/hid-core.c start = buf; start 1218 drivers/hid/hid-core.c start = kmemdup(start, size, GFP_KERNEL); start 1220 drivers/hid/hid-core.c if (start == NULL) start 1223 drivers/hid/hid-core.c device->rdesc = start; start 1234 drivers/hid/hid-core.c end = start + size; start 1245 drivers/hid/hid-core.c while ((next = fetch_item(start, end, &item)) != NULL) { start 1246 drivers/hid/hid-core.c start = next; start 1260 drivers/hid/hid-core.c if (start == end) { start 1285 drivers/hid/hid-core.c size - (unsigned int)(end - start), size); start 2030 drivers/hid/hid-core.c error = hdev->ll_driver->start(hdev); start 435 drivers/hid/hid-hyperv.c .start = mousevsc_hid_start, start 1395 drivers/hid/hid-logitech-dj.c .start = logi_dj_ll_start, start 683 drivers/hid/hid-rmi.c goto start; start 689 drivers/hid/hid-rmi.c goto start; start 698 drivers/hid/hid-rmi.c goto start; start 733 drivers/hid/hid-rmi.c start: start 668 drivers/hid/hid-steam.c .start = steam_client_ll_start, start 806 drivers/hid/i2c-hid/i2c-hid-core.c .start = i2c_hid_start, start 188 drivers/hid/intel-ish-hid/ishtp-hid.c .start = ishtp_hid_start, start 119 drivers/hid/uhid.c ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS; start 121 drivers/hid/uhid.c ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS; start 123 drivers/hid/uhid.c ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS; start 371 drivers/hid/uhid.c .start = uhid_hid_start, start 1307 drivers/hid/usbhid/hid-core.c .start = usbhid_start, start 341 drivers/hsi/controllers/omap_ssi_core.c *phy = mem->start; start 1105 drivers/hsi/controllers/omap_ssi_port.c ioarea = devm_request_mem_region(&port->device, mem->start, start 1112 drivers/hsi/controllers/omap_ssi_port.c base = devm_ioremap(&port->device, mem->start, resource_size(mem)); start 1120 drivers/hsi/controllers/omap_ssi_port.c *phy = mem->start; start 701 drivers/hv/hv_balloon.c static void hv_mem_hot_add(unsigned long start, unsigned long size, start 713 drivers/hv/hv_balloon.c start_pfn = start + (i * HA_CHUNK); start 1898 drivers/hv/vmbus_drv.c resource_size_t start = 0; start 1912 drivers/hv/vmbus_drv.c start = res->data.address32.address.minimum; start 1917 drivers/hv/vmbus_drv.c start = res->data.address64.address.minimum; start 1938 drivers/hv/vmbus_drv.c if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS) start 1943 drivers/hv/vmbus_drv.c new_res->start = start; start 1955 drivers/hv/vmbus_drv.c if (((*old_res)->end + 1) == new_res->start) { start 1961 drivers/hv/vmbus_drv.c if ((*old_res)->start == new_res->end + 1) { start 1962 drivers/hv/vmbus_drv.c (*old_res)->start = new_res->start; start 1967 drivers/hv/vmbus_drv.c if ((*old_res)->start > new_res->end) { start 1990 drivers/hv/vmbus_drv.c __release_region(hyperv_mmio, fb_mmio->start, start 2057 drivers/hv/vmbus_drv.c resource_size_t range_min, range_max, start; start 2070 drivers/hv/vmbus_drv.c !(max < fb_mmio->start)) { start 2072 drivers/hv/vmbus_drv.c range_min = fb_mmio->start; start 2074 drivers/hv/vmbus_drv.c start = (range_min + align - 1) & ~(align - 1); start 2075 drivers/hv/vmbus_drv.c for (; start + size - 1 <= range_max; start += align) { start 2076 drivers/hv/vmbus_drv.c *new = request_mem_region_exclusive(start, size, dev_n); start 2085 drivers/hv/vmbus_drv.c if ((iter->start >= max) || (iter->end <= min)) start 2088 drivers/hv/vmbus_drv.c range_min = iter->start; start 2090 drivers/hv/vmbus_drv.c start = (range_min + align - 1) & ~(align - 1); start 2091 drivers/hv/vmbus_drv.c for (; start + size - 1 <= range_max; start += align) { start 2092 drivers/hv/vmbus_drv.c shadow = __request_region(iter, start, size, NULL, start 2097 drivers/hv/vmbus_drv.c *new = request_mem_region_exclusive(start, size, dev_n); start 2104 drivers/hv/vmbus_drv.c __release_region(iter, start, size); start 2122 drivers/hv/vmbus_drv.c void vmbus_free_mmio(resource_size_t start, resource_size_t size) start 2128 drivers/hv/vmbus_drv.c if ((iter->start >= start + size) || (iter->end <= start)) start 2131 drivers/hv/vmbus_drv.c __release_region(iter, start, size); start 2133 drivers/hv/vmbus_drv.c release_mem_region(start, size); start 1276 drivers/hwmon/abituguru.c data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; start 1600 drivers/hwmon/abituguru.c res.start = address; start 973 drivers/hwmon/abituguru3.c data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; start 1271 drivers/hwmon/abituguru3.c res.start = ABIT_UGURU3_BASE; start 835 drivers/hwmon/asus_atk0110.c int start; start 858 drivers/hwmon/asus_atk0110.c start = 0; start 865 drivers/hwmon/asus_atk0110.c start = 1; start 872 drivers/hwmon/asus_atk0110.c start = 1; start 909 drivers/hwmon/asus_atk0110.c "%s%d_input", base_name, start + *num); start 915 drivers/hwmon/asus_atk0110.c "%s%d_label", base_name, start + *num); start 921 drivers/hwmon/asus_atk0110.c "%s%d_%s", base_name, start + *num, limit1_name); start 927 drivers/hwmon/asus_atk0110.c "%s%d_%s", base_name, start + *num, limit2_name); start 2587 drivers/hwmon/dme1737.c .start = addr, start 2635 drivers/hwmon/dme1737.c if (!devm_request_region(dev, res->start, DME1737_EXTENT, "dme1737")) { start 2637 drivers/hwmon/dme1737.c (unsigned short)res->start, start 2638 drivers/hwmon/dme1737.c (unsigned short)res->start + DME1737_EXTENT - 1); start 2646 drivers/hwmon/dme1737.c data->addr = res->start; start 1387 drivers/hwmon/f71805f.c if (!devm_request_region(&pdev->dev, res->start + ADDR_REG_OFFSET, 2, start 1390 drivers/hwmon/f71805f.c (unsigned long)(res->start + ADDR_REG_OFFSET), start 1391 drivers/hwmon/f71805f.c (unsigned long)(res->start + ADDR_REG_OFFSET + 1)); start 1394 drivers/hwmon/f71805f.c data->addr = res->start; start 1509 drivers/hwmon/f71805f.c .start = address, start 2344 drivers/hwmon/f71882fg.c data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; start 2716 drivers/hwmon/f71882fg.c .start = address, start 299 drivers/hwmon/ftsteutates.c .start = fts_wd_start, start 3044 drivers/hwmon/it87.c if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT, start 3047 drivers/hwmon/it87.c (unsigned long)res->start, start 3048 drivers/hwmon/it87.c (unsigned long)(res->start + IT87_EC_EXTENT - 1)); start 3056 drivers/hwmon/it87.c data->addr = res->start; start 3256 drivers/hwmon/it87.c .start = address + IT87_EC_OFFSET, start 794 drivers/hwmon/lm78.c if (!devm_request_region(dev, res->start + LM78_ADDR_REG_OFFSET, start 803 drivers/hwmon/lm78.c data->isa_addr = res->start; start 920 drivers/hwmon/lm78.c .start = address, start 1200 drivers/hwmon/nct6683.c if (!devm_request_region(dev, res->start, IOREGION_LENGTH, DRVNAME)) start 1209 drivers/hwmon/nct6683.c data->addr = res->start; start 1438 drivers/hwmon/nct6683.c res.start = address + IOREGION_OFFSET; start 3809 drivers/hwmon/nct6775.c if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH, start 3820 drivers/hwmon/nct6775.c data->addr = res->start; start 4864 drivers/hwmon/nct6775.c res.start = address + IOREGION_OFFSET; start 478 drivers/hwmon/ntc_thermistor.c int start, end, mid; start 499 drivers/hwmon/ntc_thermistor.c start = 0; start 501 drivers/hwmon/ntc_thermistor.c while (start < end) { start 502 drivers/hwmon/ntc_thermistor.c mid = start + (end - start) / 2; start 515 drivers/hwmon/ntc_thermistor.c start = mid + 1; start 522 drivers/hwmon/ntc_thermistor.c if (ohm >= data->comp[start].ohm) start 523 drivers/hwmon/ntc_thermistor.c end = start; start 114 drivers/hwmon/occ/p8_i2c.c unsigned long start; start 122 drivers/hwmon/occ/p8_i2c.c start = jiffies; start 154 drivers/hwmon/occ/p8_i2c.c if (time_after(jiffies, start + timeout)) start 1701 drivers/hwmon/pc87360.c res[res_count].start = extra_isa[i]; start 947 drivers/hwmon/pc87427.c if (!devm_request_region(&pdev->dev, res->start, start 951 drivers/hwmon/pc87427.c (unsigned long)res->start, start 1149 drivers/hwmon/pc87427.c res[res_count].start = sio_data->address[i]; start 1160 drivers/hwmon/pc87427.c pdev = platform_device_alloc(DRVNAME, res[0].start); start 465 drivers/hwmon/sch5627.c data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; start 406 drivers/hwmon/sch5636.c data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; start 375 drivers/hwmon/sch56xx-common.c .start = watchdog_start, start 517 drivers/hwmon/sch56xx-common.c .start = address, start 582 drivers/hwmon/sis5595.c if (!devm_request_region(&pdev->dev, res->start, SIS5595_EXTENT, start 593 drivers/hwmon/sis5595.c data->addr = res->start; start 765 drivers/hwmon/sis5595.c .start = address, start 227 drivers/hwmon/smsc47b397.c if (!devm_request_region(dev, res->start, SMSC_EXTENT, start 230 drivers/hwmon/smsc47b397.c (unsigned long)res->start, start 231 drivers/hwmon/smsc47b397.c (unsigned long)res->start + SMSC_EXTENT - 1); start 239 drivers/hwmon/smsc47b397.c data->addr = res->start; start 252 drivers/hwmon/smsc47b397.c .start = address, start 671 drivers/hwmon/smsc47m1.c unsigned short start = address + ports[i]; start 677 drivers/hwmon/smsc47m1.c err = acpi_check_region(start, len, DRVNAME); start 683 drivers/hwmon/smsc47m1.c if (!devm_request_region(dev, start, len, DRVNAME)) { start 686 drivers/hwmon/smsc47m1.c start, start + len); start 722 drivers/hwmon/smsc47m1.c err = smsc47m1_handle_resources(res->start, sio_data->type, start 731 drivers/hwmon/smsc47m1.c data->addr = res->start; start 864 drivers/hwmon/smsc47m1.c .start = address, start 674 drivers/hwmon/via686a.c if (!devm_request_region(&pdev->dev, res->start, VIA686A_EXTENT, start 677 drivers/hwmon/via686a.c (unsigned long)res->start, (unsigned long)res->end); start 687 drivers/hwmon/via686a.c data->addr = res->start; start 820 drivers/hwmon/via686a.c .start = address, start 1155 drivers/hwmon/vt1211.c if (!devm_request_region(dev, res->start, resource_size(res), start 1158 drivers/hwmon/vt1211.c (unsigned long)res->start, (unsigned long)res->end); start 1161 drivers/hwmon/vt1211.c data->addr = res->start; start 1238 drivers/hwmon/vt1211.c .start = address, start 786 drivers/hwmon/vt8231.c if (!devm_request_region(&pdev->dev, res->start, VT8231_EXTENT, start 789 drivers/hwmon/vt8231.c (unsigned long)res->start, (unsigned long)res->end); start 798 drivers/hwmon/vt8231.c data->addr = res->start; start 943 drivers/hwmon/vt8231.c .start = address, start 2048 drivers/hwmon/w83627ehf.c if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) { start 2051 drivers/hwmon/w83627ehf.c (unsigned long)res->start, start 2052 drivers/hwmon/w83627ehf.c (unsigned long)res->start + IOREGION_LENGTH - 1); start 2063 drivers/hwmon/w83627ehf.c data->addr = res->start; start 2582 drivers/hwmon/w83627ehf.c release_region(res->start, IOREGION_LENGTH); start 2838 drivers/hwmon/w83627ehf.c res.start = address + IOREGION_OFFSET; start 1410 drivers/hwmon/w83627hf.c if (!devm_request_region(dev, res->start, WINB_REGION_SIZE, DRVNAME)) { start 1412 drivers/hwmon/w83627hf.c (unsigned long)res->start, start 1413 drivers/hwmon/w83627hf.c (unsigned long)(res->start + WINB_REGION_SIZE - 1)); start 1421 drivers/hwmon/w83627hf.c data->addr = res->start; start 1920 drivers/hwmon/w83627hf.c .start = address + WINB_REGION_OFFSET, start 1759 drivers/hwmon/w83781d.c res->start + W83781D_ADDR_REG_OFFSET, 2, start 1769 drivers/hwmon/w83781d.c data->isa_addr = res->start; start 1932 drivers/hwmon/w83781d.c .start = address, start 92 drivers/hwspinlock/omap_hwspinlock.c io_base = ioremap(res->start, resource_size(res)); start 103 drivers/hwspinlock/u8500_hsem.c io_base = ioremap(res->start, resource_size(res)); start 452 drivers/hwtracing/coresight/coresight-etm-perf.c unsigned long start, stop; start 460 drivers/hwtracing/coresight/coresight-etm-perf.c start = fr[i].start; start 461 drivers/hwtracing/coresight/coresight-etm-perf.c stop = start + fr[i].size; start 466 drivers/hwtracing/coresight/coresight-etm-perf.c etm_filter->start_addr = start; start 471 drivers/hwtracing/coresight/coresight-etm-perf.c etm_filter->start_addr = start; start 595 drivers/hwtracing/coresight/coresight-etm-perf.c etm_pmu.start = etm_event_start; start 796 drivers/hwtracing/coresight/coresight-etm4x.c u64 start, u64 stop, int comparator) start 801 drivers/hwtracing/coresight/coresight-etm4x.c config->addr_val[comparator] = start; start 851 drivers/hwtracing/coresight/coresight-etm4x.c u64 start, stop; start 857 drivers/hwtracing/coresight/coresight-etm4x.c start = 0x0; start 860 drivers/hwtracing/coresight/coresight-etm4x.c etm4_set_comparator_filter(config, start, stop, start 890 drivers/hwtracing/coresight/coresight-stm.c drvdata->chs.phys = ch_res.start; start 351 drivers/hwtracing/coresight/coresight-tmc-etr.c int i, index, start; start 356 drivers/hwtracing/coresight/coresight-tmc-etr.c start = offset >> PAGE_SHIFT; start 357 drivers/hwtracing/coresight/coresight-tmc-etr.c for (i = start; i < (start + npages); i++) { start 434 drivers/hwtracing/intel_th/core.c .start = REG_GTH_OFFSET, start 447 drivers/hwtracing/intel_th/core.c .start = REG_MSU_OFFSET, start 452 drivers/hwtracing/intel_th/core.c .start = BUF_MSU_OFFSET, start 468 drivers/hwtracing/intel_th/core.c .start = REG_MSU_OFFSET, start 473 drivers/hwtracing/intel_th/core.c .start = BUF_MSU_OFFSET, start 489 drivers/hwtracing/intel_th/core.c .start = REG_STH_OFFSET, start 494 drivers/hwtracing/intel_th/core.c .start = TH_MMIO_SW, start 507 drivers/hwtracing/intel_th/core.c .start = REG_STH_OFFSET, start 512 drivers/hwtracing/intel_th/core.c .start = TH_MMIO_RTIT, start 525 drivers/hwtracing/intel_th/core.c .start = REG_PTI_OFFSET, start 540 drivers/hwtracing/intel_th/core.c .start = REG_PTI_OFFSET, start 555 drivers/hwtracing/intel_th/core.c .start = REG_DCIH_OFFSET, start 627 drivers/hwtracing/intel_th/core.c bar = res[r].start; start 631 drivers/hwtracing/intel_th/core.c res[r].start = 0; start 636 drivers/hwtracing/intel_th/core.c res[r].start += devres[bar].start; start 637 drivers/hwtracing/intel_th/core.c res[r].end += devres[bar].start; start 647 drivers/hwtracing/intel_th/core.c res[r].start = th->irq; start 879 drivers/hwtracing/intel_th/core.c err = devm_request_irq(dev, devres[r].start, start 886 drivers/hwtracing/intel_th/core.c th->irq = devres[r].start; start 742 drivers/hwtracing/intel_th/gth.c base = devm_ioremap(dev, res->start, resource_size(res)); start 1462 drivers/hwtracing/intel_th/msu.c unsigned long start = off, tocopy = 0; start 1465 drivers/hwtracing/intel_th/msu.c start += msc->single_sz; start 1466 drivers/hwtracing/intel_th/msu.c if (start < size) { start 1467 drivers/hwtracing/intel_th/msu.c tocopy = min(rem, size - start); start 1468 drivers/hwtracing/intel_th/msu.c if (copy_to_user(buf, msc->base + start, tocopy)) start 1473 drivers/hwtracing/intel_th/msu.c start += tocopy; start 1476 drivers/hwtracing/intel_th/msu.c start &= size - 1; start 1478 drivers/hwtracing/intel_th/msu.c tocopy = min(rem, msc->single_sz - start); start 1479 drivers/hwtracing/intel_th/msu.c if (copy_to_user(buf, msc->base + start, tocopy)) start 1488 drivers/hwtracing/intel_th/msu.c if (copy_to_user(buf, msc->base + start, rem)) start 2074 drivers/hwtracing/intel_th/msu.c base = devm_ioremap(dev, res->start, resource_size(res)); start 90 drivers/hwtracing/intel_th/pci.c if (pdev->resource[TH_PCI_RTIT_BAR].start) { start 99 drivers/hwtracing/intel_th/pci.c resource[r].start = pci_irq_vector(pdev, i); start 212 drivers/hwtracing/intel_th/pti.c base = devm_ioremap(dev, res->start, resource_size(res)); start 200 drivers/hwtracing/intel_th/sth.c base = devm_ioremap(dev, res->start, resource_size(res)); start 208 drivers/hwtracing/intel_th/sth.c channels = devm_ioremap(dev, res->start, resource_size(res)); start 219 drivers/hwtracing/intel_th/sth.c sth->channels_phys = res->start; start 221 drivers/hwtracing/stm/core.c static int find_free_channels(unsigned long *bitmap, unsigned int start, start 227 drivers/hwtracing/stm/core.c for (pos = start; pos < end + 1; pos = ALIGN(pos, width)) { start 501 drivers/hwtracing/stm/policy.c char *start, *end = s; start 508 drivers/hwtracing/stm/policy.c start = strsep(&end, "/"); start 509 drivers/hwtracing/stm/policy.c if (!start) start 512 drivers/hwtracing/stm/policy.c if (!*start) start 518 drivers/hwtracing/stm/policy.c if (!strcmp(start, start 77 drivers/i2c/algos/i2c-algo-bit.c unsigned long start; start 85 drivers/i2c/algos/i2c-algo-bit.c start = jiffies; start 92 drivers/i2c/algos/i2c-algo-bit.c if (time_after(jiffies, start + adap->timeout)) { start 103 drivers/i2c/algos/i2c-algo-bit.c if (jiffies != start && i2c_debug >= 3) start 105 drivers/i2c/algos/i2c-algo-bit.c jiffies - start); start 212 drivers/i2c/busses/i2c-at91-core.c phy_addr = mem->start; start 462 drivers/i2c/busses/i2c-bcm2835.c i2c_dev->irq = irq->start; start 920 drivers/i2c/busses/i2c-cadence.c "Cadence I2C at %08lx", (unsigned long)r_mem->start); start 977 drivers/i2c/busses/i2c-cadence.c id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); start 478 drivers/i2c/busses/i2c-fsi.c unsigned long start; start 509 drivers/i2c/busses/i2c-fsi.c start = jiffies; start 520 drivers/i2c/busses/i2c-fsi.c } while (time_after(start + I2C_ABORT_TIMEOUT, jiffies)); start 579 drivers/i2c/busses/i2c-fsi.c unsigned long start = jiffies; start 601 drivers/i2c/busses/i2c-fsi.c } while (time_after(start + timeout, jiffies)); start 372 drivers/i2c/busses/i2c-highlander.c dev->base = ioremap_nocache(res->start, resource_size(res)); start 1544 drivers/i2c/busses/i2c-i801.c res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; start 1546 drivers/i2c/busses/i2c-i801.c res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; start 1548 drivers/i2c/busses/i2c-i801.c res->end = res->start + 3; start 1594 drivers/i2c/busses/i2c-i801.c res->start = tco_base & ~1; start 1595 drivers/i2c/busses/i2c-i801.c res->end = res->start + 32 - 1; start 145 drivers/i2c/busses/i2c-icy.c z->resource.start, start 150 drivers/i2c/busses/i2c-icy.c i2c->reg_s0 = ZTWO_VADDR(z->resource.start); start 151 drivers/i2c/busses/i2c-icy.c i2c->reg_s1 = ZTWO_VADDR(z->resource.start + 2); start 166 drivers/i2c/busses/i2c-icy.c &z->resource.start); start 1079 drivers/i2c/busses/i2c-imx.c phy_addr = (dma_addr_t)res->start; start 406 drivers/i2c/busses/i2c-iop3xx.c release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); start 454 drivers/i2c/busses/i2c-iop3xx.c if (!request_mem_region(res->start, IOP3XX_I2C_IO_SIZE, pdev->name)) { start 462 drivers/i2c/busses/i2c-iop3xx.c adapter_data->ioaddr = ioremap(res->start, IOP3XX_I2C_IO_SIZE); start 511 drivers/i2c/busses/i2c-iop3xx.c release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); start 265 drivers/i2c/busses/i2c-isch.c if (!devm_request_region(&dev->dev, res->start, resource_size(res), start 272 drivers/i2c/busses/i2c-isch.c sch_smba = res->start; start 847 drivers/i2c/busses/i2c-ismt.c unsigned long start, len; start 876 drivers/i2c/busses/i2c-ismt.c start = pci_resource_start(pdev, SMBBAR); start 878 drivers/i2c/busses/i2c-ismt.c if (!start || !len) { start 885 drivers/i2c/busses/i2c-ismt.c "SMBus iSMT adapter at %lx", start); start 887 drivers/i2c/busses/i2c-ismt.c dev_dbg(&priv->pci_dev->dev, " start=0x%lX\n", start); start 900 drivers/i2c/busses/i2c-ismt.c start, start + len); start 739 drivers/i2c/busses/i2c-mpc.c "MPC adapter at 0x%llx", (unsigned long long)res.start); start 372 drivers/i2c/busses/i2c-mxs.c uint32_t start; start 456 drivers/i2c/busses/i2c-mxs.c start = MXS_I2C_CTRL0_PRE_SEND_START; start 460 drivers/i2c/busses/i2c-mxs.c start |= MXS_I2C_CTRL0_RETAIN_CLOCK; start 471 drivers/i2c/busses/i2c-mxs.c start |= flags; start 473 drivers/i2c/busses/i2c-mxs.c start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK; start 505 drivers/i2c/busses/i2c-mxs.c start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "", start 506 drivers/i2c/busses/i2c-mxs.c start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "", start 507 drivers/i2c/busses/i2c-mxs.c start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : ""); start 513 drivers/i2c/busses/i2c-mxs.c start | MXS_I2C_CTRL0_MASTER_MODE | start 518 drivers/i2c/busses/i2c-mxs.c start &= ~MXS_I2C_CTRL0_PRE_SEND_START; start 997 drivers/i2c/busses/i2c-nomadik.c dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, start 1070 drivers/i2c/busses/i2c-nomadik.c release_mem_region(res->start, resource_size(res)); start 630 drivers/i2c/busses/i2c-ocores.c i2c->iobase = res->start; start 631 drivers/i2c/busses/i2c-ocores.c if (!devm_request_region(&pdev->dev, res->start, start 160 drivers/i2c/busses/i2c-pca-platform.c i2c->io_base = res->start; start 168 drivers/i2c/busses/i2c-pca-platform.c (unsigned long) res->start); start 274 drivers/i2c/busses/i2c-pmcmsp.c if (!request_mem_region(res->start, resource_size(res), start 278 drivers/i2c/busses/i2c-pmcmsp.c res->start); start 284 drivers/i2c/busses/i2c-pmcmsp.c pmcmsptwi_data.iobase = ioremap_nocache(res->start, start 288 drivers/i2c/busses/i2c-pmcmsp.c "Unable to ioremap address 0x%08x\n", res->start); start 347 drivers/i2c/busses/i2c-pmcmsp.c release_mem_region(res->start, resource_size(res)); start 371 drivers/i2c/busses/i2c-pmcmsp.c release_mem_region(res->start, resource_size(res)); start 738 drivers/i2c/busses/i2c-pnx.c alg_data->adapter.name, res->start, alg_data->irq); start 193 drivers/i2c/busses/i2c-puv3.c if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c")) start 203 drivers/i2c/busses/i2c-puv3.c mem->start); start 221 drivers/i2c/busses/i2c-puv3.c release_mem_region(mem->start, resource_size(mem)); start 236 drivers/i2c/busses/i2c-puv3.c release_mem_region(mem->start, resource_size(mem)); start 37 drivers/i2c/busses/i2c-pxa-pci.c res[0].start = pci_resource_start(dev, bar); start 41 drivers/i2c/busses/i2c-pxa-pci.c res[1].start = dev->irq; start 52 drivers/i2c/busses/i2c-pxa-pci.c if (r.start != res[0].start) start 1264 drivers/i2c/busses/i2c-pxa.c i2c->iobase = res->start; start 218 drivers/i2c/busses/i2c-qup.c u8 *start; start 669 drivers/i2c/busses/i2c-qup.c tags = &qup->start_tag.start[qup->tag_buf_pos + len]; start 675 drivers/i2c/busses/i2c-qup.c &qup->brx.tag.start[0], start 692 drivers/i2c/busses/i2c-qup.c &qup->start_tag.start[qup->tag_buf_pos], start 701 drivers/i2c/busses/i2c-qup.c tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len]; start 738 drivers/i2c/busses/i2c-qup.c qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT; start 743 drivers/i2c/busses/i2c-qup.c &qup->brx.tag.start[0], start 749 drivers/i2c/busses/i2c-qup.c qup->btx.tag.start[len - 1] = QUP_BAM_FLUSH_STOP; start 750 drivers/i2c/busses/i2c-qup.c ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], &qup->btx.tag.start[0], start 1736 drivers/i2c/busses/i2c-qup.c qup->start_tag.start = devm_kzalloc(&pdev->dev, start 1738 drivers/i2c/busses/i2c-qup.c if (!qup->start_tag.start) { start 1743 drivers/i2c/busses/i2c-qup.c qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); start 1744 drivers/i2c/busses/i2c-qup.c if (!qup->brx.tag.start) { start 1749 drivers/i2c/busses/i2c-qup.c qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); start 1750 drivers/i2c/busses/i2c-qup.c if (!qup->btx.tag.start) { start 734 drivers/i2c/busses/i2c-rcar.c chan = rcar_i2c_request_dma_chan(dev, dir, priv->res->start + ICRXTX); start 420 drivers/i2c/busses/i2c-riic.c ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr, start 642 drivers/i2c/busses/i2c-s3c2410.c ktime_t start, now; start 650 drivers/i2c/busses/i2c-s3c2410.c start = now = ktime_get(); start 676 drivers/i2c/busses/i2c-s3c2410.c ktime_us_delta(now, start) < S3C2410_IDLE_TIMEOUT) { start 460 drivers/i2c/busses/i2c-sh7760.c id->ioarea = request_mem_region(res->start, REGSIZE, pdev->name); start 467 drivers/i2c/busses/i2c-sh7760.c id->iobase = ioremap(res->start, REGSIZE); start 483 drivers/i2c/busses/i2c-sh7760.c "SH7760 I2C at %08lx", (unsigned long)res->start); start 519 drivers/i2c/busses/i2c-sh7760.c pd->speed_khz, res->start, id->irq); start 529 drivers/i2c/busses/i2c-sh_mobile.c pd->res->start + ICDR); start 532 drivers/i2c/busses/i2c-sh_mobile.c pd->res->start + ICDR); start 806 drivers/i2c/busses/i2c-sh_mobile.c for (n = res->start; n <= res->end; n++) { start 82 drivers/i2c/busses/i2c-simtec.c pd->ioarea = request_mem_region(res->start, size, dev->name); start 89 drivers/i2c/busses/i2c-simtec.c pd->reg = ioremap(res->start, size); start 122 drivers/i2c/busses/i2c-simtec.c release_mem_region(pd->ioarea->start, size); start 136 drivers/i2c/busses/i2c-simtec.c release_mem_region(pd->ioarea->start, resource_size(pd->ioarea)); start 862 drivers/i2c/busses/i2c-st.c snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start); start 837 drivers/i2c/busses/i2c-stm32f4.c snprintf(adap->name, sizeof(adap->name), "STM32 I2C(%pa)", &res->start); start 1844 drivers/i2c/busses/i2c-stm32f7.c phy_addr = (dma_addr_t)res->start; start 1944 drivers/i2c/busses/i2c-stm32f7.c &res->start); start 185 drivers/i2c/busses/i2c-taos-evm.c char *start, *end; start 187 drivers/i2c/busses/i2c-taos-evm.c start = strstr(buffer, "TAOS "); start 188 drivers/i2c/busses/i2c-taos-evm.c if (!start) start 191 drivers/i2c/busses/i2c-taos-evm.c end = strchr(start, '\r'); start 196 drivers/i2c/busses/i2c-taos-evm.c return start; start 1521 drivers/i2c/busses/i2c-tegra.c base_phys = res->start; start 1531 drivers/i2c/busses/i2c-tegra.c irq = res->start; start 116 drivers/i2c/busses/i2c-viperboard.c u16 remain_len, len1, len2, start = 0x0000; start 123 drivers/i2c/busses/i2c-viperboard.c rmsg->header.addr = cpu_to_le16(start + 0x4000); start 194 drivers/i2c/busses/i2c-viperboard.c start += 1024; start 204 drivers/i2c/busses/i2c-viperboard.c memcpy(msg->buf + start, rmsg, len1); start 212 drivers/i2c/busses/i2c-viperboard.c memcpy(msg->buf + start + 512, rmsg, len2); start 222 drivers/i2c/busses/i2c-viperboard.c start = 0x0000; start 232 drivers/i2c/busses/i2c-viperboard.c wmsg->header.addr = cpu_to_le16(start + 0x4000); start 238 drivers/i2c/busses/i2c-viperboard.c start += 503; start 252 drivers/i2c/busses/i2c-viperboard.c memcpy(wmsg->data, msg->buf + start, start 508 drivers/i2c/busses/scx200_acb.c iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); start 712 drivers/i2c/i2c-core-base.c irqd = irq_get_irq_data(r->start); start 719 drivers/i2c/i2c-core-base.c return r->start; start 190 drivers/i3c/master/dw-i3c-master.c #define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2)) start 531 drivers/ide/au1xxx-ide.c if (!request_mem_region(res->start, resource_size(res), dev->name)) { start 537 drivers/ide/au1xxx-ide.c ahwif->regbase = (u32)ioremap(res->start, resource_size(res)); start 549 drivers/ide/au1xxx-ide.c ahwif->ddma_id = res->start; start 581 drivers/ide/au1xxx-ide.c release_mem_region(res->start, resource_size(res)); start 184 drivers/ide/buddha.c board = z->resource.start; start 132 drivers/ide/gayle.c if (!request_mem_region(res->start, resource_size(res), "IDE")) start 164 drivers/ide/gayle.c release_mem_region(res->start, resource_size(res)); start 174 drivers/ide/gayle.c release_mem_region(res->start, resource_size(res)); start 129 drivers/ide/ide-atapi.c int ide_do_start_stop(ide_drive_t *drive, struct gendisk *disk, int start) start 135 drivers/ide/ide-atapi.c pc.c[4] = start; start 207 drivers/ide/ide-cs.c io_base = link->resource[0]->start; start 209 drivers/ide/ide-cs.c ctl_base = link->resource[1]->start; start 211 drivers/ide/ide-cs.c ctl_base = link->resource[0]->start + 0x0e; start 79 drivers/ide/ide_platform.c res_base->start, resource_size(res_base)); start 81 drivers/ide/ide_platform.c res_alt->start, resource_size(res_alt)); start 84 drivers/ide/ide_platform.c res_base->start, resource_size(res_base)); start 86 drivers/ide/ide_platform.c res_alt->start, resource_size(res_alt)); start 90 drivers/ide/ide_platform.c plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); start 334 drivers/ide/palm_bk3710.c if (request_mem_region(mem->start, mem_size, "palm_bk3710") == NULL) { start 339 drivers/ide/palm_bk3710.c base = ioremap(mem->start, mem_size); start 342 drivers/ide/palm_bk3710.c release_mem_region(mem->start, mem_size); start 355 drivers/ide/palm_bk3710.c hw.irq = irq->start; start 147 drivers/ide/tx4938ide.c if (!devm_request_mem_region(&pdev->dev, res->start, start 150 drivers/ide/tx4938ide.c mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start, start 153 drivers/ide/tx4938ide.c res->start + 0x10000 + start 551 drivers/ide/tx4939ide.c if (!devm_request_mem_region(&pdev->dev, res->start, start 554 drivers/ide/tx4939ide.c mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start, start 1736 drivers/iio/adc/at91-sama5d2_adc.c st->dma_st.phys_addr = res->start; start 161 drivers/iio/adc/lpc32xx_adc.c st->adc_base = devm_ioremap(&pdev->dev, res->start, start 123 drivers/iio/adc/men_z188_adc.c adc->base = ioremap(mem->start, resource_size(mem)); start 715 drivers/iio/adc/mxs-lradc-adc.c adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); start 665 drivers/iio/adc/stm32-adc-core.c priv->common.phys_base = res->start; start 234 drivers/iio/adc/stm32-dfsdm-core.c priv->dfsdm.phys_base = res->start; start 89 drivers/iio/chemical/sgp30.c u8 start; start 209 drivers/iio/chemical/sgp30.c u8 *data_buf = &buf->start; start 250 drivers/iio/chemical/sgp30.c data_buf = &buf->start; start 46 drivers/iio/trigger/iio-trig-interrupt.c irq = irq_res->start; start 3328 drivers/infiniband/core/mad.c int start, i; start 3330 drivers/infiniband/core/mad.c start = rdma_start_port(device); start 3332 drivers/infiniband/core/mad.c for (i = start; i <= rdma_end_port(device); i++) { start 3353 drivers/infiniband/core/mad.c while (--i >= start) { start 901 drivers/infiniband/core/nldev.c int start = cb->args[0]; start 904 drivers/infiniband/core/nldev.c if (idx < start) start 992 drivers/infiniband/core/nldev.c int start = cb->args[0]; start 1020 drivers/infiniband/core/nldev.c if (idx < start) { start 1094 drivers/infiniband/core/nldev.c int start = cb->args[0]; start 1097 drivers/infiniband/core/nldev.c if (idx < start) start 1280 drivers/infiniband/core/nldev.c int start = cb->args[0]; start 1344 drivers/infiniband/core/nldev.c if (idx < start || !rdma_restrack_get(res)) start 109 drivers/infiniband/core/umem_odp.c u64 start, u64 end, void *cookie) start 112 drivers/infiniband/core/umem_odp.c item->umem.ibdev->ops.invalidate_range(item, start, end); start 138 drivers/infiniband/core/umem_odp.c rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, start 148 drivers/infiniband/core/umem_odp.c static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, start 164 drivers/infiniband/core/umem_odp.c rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, start 218 drivers/infiniband/core/umem_odp.c umem_odp->interval_tree.start = start 230 drivers/infiniband/core/umem_odp.c umem_odp->interval_tree.start) >> start 770 drivers/infiniband/core/umem_odp.c u64 start, u64 last, start 779 drivers/infiniband/core/umem_odp.c if (unlikely(start == last)) start 782 drivers/infiniband/core/umem_odp.c for (node = interval_tree_iter_first(root, start, last - 1); start 787 drivers/infiniband/core/umem_odp.c next = interval_tree_iter_next(node, start, last - 1); start 789 drivers/infiniband/core/umem_odp.c ret_val = cb(umem, start, last, cookie) || ret_val; start 718 drivers/infiniband/core/uverbs_cmd.c if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) start 744 drivers/infiniband/core/uverbs_cmd.c mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, start 806 drivers/infiniband/core/uverbs_cmd.c (!cmd.start || !cmd.hca_va || 0 >= cmd.length || start 807 drivers/infiniband/core/uverbs_cmd.c (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) start 837 drivers/infiniband/core/uverbs_cmd.c ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start, start 472 drivers/infiniband/core/uverbs_uapi.c static void uapi_remove_range(struct uverbs_api *uapi, u32 start, u32 last) start 477 drivers/infiniband/core/uverbs_uapi.c radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) { start 3501 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, start 3535 drivers/infiniband/hw/bnxt_re/ib_verbs.c umem = ib_umem_get(udata, start, length, mr_access_flags, 0); start 208 drivers/infiniband/hw/bnxt_re/ib_verbs.h struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 433 drivers/infiniband/hw/cxgb3/iwch_provider.c static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 454 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->umem = ib_umem_get(udata, start, length, acc, 0); start 802 drivers/infiniband/hw/cxgb4/device.c if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || start 805 drivers/infiniband/hw/cxgb4/device.c pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, start 824 drivers/infiniband/hw/cxgb4/device.c pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, start 826 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->pbl.start, start 827 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, start 829 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->qp.start, start 831 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->cq.start, start 877 drivers/infiniband/hw/cxgb4/device.c rdev->status_page->qp_start = rdev->lldi.vr->qp.start; start 879 drivers/infiniband/hw/cxgb4/device.c rdev->status_page->cq_start = rdev->lldi.vr->cq.start; start 1038 drivers/infiniband/hw/cxgb4/device.c devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, start 63 drivers/infiniband/hw/cxgb4/id_table.c obj += alloc->start; start 75 drivers/infiniband/hw/cxgb4/id_table.c obj -= alloc->start; start 82 drivers/infiniband/hw/cxgb4/id_table.c int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, start 87 drivers/infiniband/hw/cxgb4/id_table.c alloc->start = start; start 76 drivers/infiniband/hw/cxgb4/iw_cxgb4.h #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start) start 77 drivers/infiniband/hw/cxgb4/iw_cxgb4.h #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start) start 89 drivers/infiniband/hw/cxgb4/iw_cxgb4.h u32 start; /* logical minimal id */ start 939 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, start 990 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, start 333 drivers/infiniband/hw/cxgb4/mem.c (rdev->lldi.vr->stag.start >> 5), start 352 drivers/infiniband/hw/cxgb4/mem.c pbl_addr, rdev->lldi.vr->pbl.start, start 508 drivers/infiniband/hw/cxgb4/mem.c struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 524 drivers/infiniband/hw/cxgb4/mem.c if ((length + start) < start) start 546 drivers/infiniband/hw/cxgb4/mem.c mhp->umem = ib_umem_get(udata, start, length, acc, 0); start 122 drivers/infiniband/hw/cxgb4/qp.c rdev->lldi.vr->ocq.start; start 124 drivers/infiniband/hw/cxgb4/qp.c rdev->lldi.vr->ocq.start); start 1809 drivers/infiniband/hw/cxgb4/qp.c rhp->rdev.lldi.vr->rq.start); start 2550 drivers/infiniband/hw/cxgb4/qp.c wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> start 2619 drivers/infiniband/hw/cxgb4/qp.c rdev->lldi.vr->rq.start); start 43 drivers/infiniband/hw/cxgb4/resource.c rdev->lldi.vr->qp.start, start 48 drivers/infiniband/hw/cxgb4/resource.c for (i = rdev->lldi.vr->qp.start; start 49 drivers/infiniband/hw/cxgb4/resource.c i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) start 308 drivers/infiniband/hw/cxgb4/resource.c pbl_start = rdev->lldi.vr->pbl.start; start 398 drivers/infiniband/hw/cxgb4/resource.c rqt_start = rdev->lldi.vr->rq.start + skip; start 483 drivers/infiniband/hw/cxgb4/resource.c unsigned start, chunk, top; start 489 drivers/infiniband/hw/cxgb4/resource.c start = rdev->lldi.vr->ocq.start; start 491 drivers/infiniband/hw/cxgb4/resource.c top = start + chunk; start 493 drivers/infiniband/hw/cxgb4/resource.c while (start < top) { start 494 drivers/infiniband/hw/cxgb4/resource.c chunk = min(top - start + 1, chunk); start 495 drivers/infiniband/hw/cxgb4/resource.c if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { start 497 drivers/infiniband/hw/cxgb4/resource.c start, chunk); start 500 drivers/infiniband/hw/cxgb4/resource.c start, top - start); start 506 drivers/infiniband/hw/cxgb4/resource.c start, chunk); start 507 drivers/infiniband/hw/cxgb4/resource.c start += chunk; start 140 drivers/infiniband/hw/efa/efa.h struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, start 1391 drivers/infiniband/hw/efa/efa_verbs.c struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, start 1426 drivers/infiniband/hw/efa/efa_verbs.c mr->umem = ib_umem_get(udata, start, length, access_flags, 0); start 1450 drivers/infiniband/hw/efa/efa_verbs.c params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)), start 1455 drivers/infiniband/hw/efa/efa_verbs.c start, length, params.page_shift, params.page_num); start 8291 drivers/infiniband/hw/hfi1/chip.c entry->is_int(dd, source - entry->start); start 14326 drivers/infiniband/hw/hfi1/chip.c int i, idx, regoff, regidx, start; start 14332 drivers/infiniband/hw/hfi1/chip.c start = 1; start 14334 drivers/infiniband/hw/hfi1/chip.c start = dd->first_dyn_alloc_ctxt; start 14336 drivers/infiniband/hw/hfi1/chip.c total_cnt = dd->num_rcv_contexts - start; start 14354 drivers/infiniband/hw/hfi1/chip.c offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); start 14356 drivers/infiniband/hw/hfi1/chip.c for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; start 1460 drivers/infiniband/hw/hfi1/chip.h int start; /* interrupt source type start */ start 643 drivers/infiniband/hw/hfi1/debugfs.c loff_t start, end; start 659 drivers/infiniband/hw/hfi1/debugfs.c start = *ppos & ~0x7; /* round down */ start 660 drivers/infiniband/hw/hfi1/debugfs.c if (start < DC8051_DATA_MEM_SIZE) { start 664 drivers/infiniband/hw/hfi1/debugfs.c rval = read_8051_data(ppd->dd, start, end - start, start 665 drivers/infiniband/hw/hfi1/debugfs.c (u64 *)(tmp + start)); start 54 drivers/infiniband/hw/hfi1/debugfs.h .start = _##name##_seq_start, \ start 101 drivers/infiniband/hw/hfi1/eprom.c static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, void *dest) start 112 drivers/infiniband/hw/hfi1/eprom.c end = start + len; start 123 drivers/infiniband/hw/hfi1/eprom.c start_offset = start & EP_PAGE_MASK; start 128 drivers/infiniband/hw/hfi1/eprom.c read_start = start & ~EP_PAGE_MASK; start 142 drivers/infiniband/hw/hfi1/eprom.c start += bytes; start 150 drivers/infiniband/hw/hfi1/eprom.c read_page(dd, start, buffer); start 153 drivers/infiniband/hw/hfi1/eprom.c start += EP_PAGE_SIZE; start 160 drivers/infiniband/hw/hfi1/eprom.c read_page(dd, start, buffer); start 336 drivers/infiniband/hw/hfi1/firmware.c static int write_8051(struct hfi1_devdata *dd, int code, u32 start, start 351 drivers/infiniband/hw/hfi1/firmware.c reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK) start 296 drivers/infiniband/hw/hfi1/mmu_rb.c for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1); start 299 drivers/infiniband/hw/hfi1/mmu_rb.c ptr = __mmu_int_rb_iter_next(node, range->start, start 1530 drivers/infiniband/hw/hfi1/pio.c pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; start 84 drivers/infiniband/hw/hfi1/pio.h void __iomem *start; /* buffer start address */ start 74 drivers/infiniband/hw/hfi1/pio_copy.c void __iomem *dest = pbuf->start + SOP_DISTANCE; start 307 drivers/infiniband/hw/hfi1/pio_copy.c void __iomem *dest = pbuf->start + SOP_DISTANCE; start 399 drivers/infiniband/hw/hfi1/pio_copy.c void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); start 420 drivers/infiniband/hw/hfi1/pio_copy.c send = pbuf->start + PIO_BLOCK_SIZE; start 527 drivers/infiniband/hw/hfi1/pio_copy.c void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); start 546 drivers/infiniband/hw/hfi1/pio_copy.c send = pbuf->start + PIO_BLOCK_SIZE; start 657 drivers/infiniband/hw/hfi1/pio_copy.c dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); start 711 drivers/infiniband/hw/hfi1/pio_copy.c void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); start 68 drivers/infiniband/hw/hfi1/trace_misc.h src - is_entry->start); start 133 drivers/infiniband/hw/hfi1/trace_rx.h unsigned long start, unsigned long end), start 134 drivers/infiniband/hw/hfi1/trace_rx.h TP_ARGS(ctxt, subctxt, type, start, end), start 139 drivers/infiniband/hw/hfi1/trace_rx.h __field(unsigned long, start) start 146 drivers/infiniband/hw/hfi1/trace_rx.h __entry->start = start; start 153 drivers/infiniband/hw/hfi1/trace_rx.h __entry->start, start 69 drivers/infiniband/hw/hfi1/user_exp_rcv.c unsigned int start, u16 count, start 682 drivers/infiniband/hw/hfi1/user_exp_rcv.c unsigned int start, u16 count, start 707 drivers/infiniband/hw/hfi1/user_exp_rcv.c u16 npages, pageidx, setidx = start + idx; start 86 drivers/infiniband/hw/hfi1/user_sdma.c unsigned start, unsigned npages); start 1080 drivers/infiniband/hw/hfi1/user_sdma.c unsigned start, unsigned npages) start 1082 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_release_user_pages(mm, pages + start, npages, false); start 241 drivers/infiniband/hw/hns/hns_roce_alloc.c int buf_cnt, int start, struct hns_roce_buf *buf) start 246 drivers/infiniband/hw/hns/hns_roce_alloc.c end = start + buf_cnt; start 250 drivers/infiniband/hw/hns/hns_roce_alloc.c start, buf_cnt, buf->npages); start 255 drivers/infiniband/hw/hns/hns_roce_alloc.c for (i = start; i < end; i++) start 266 drivers/infiniband/hw/hns/hns_roce_alloc.c int buf_cnt, int start, struct ib_umem *umem, start 283 drivers/infiniband/hw/hns/hns_roce_alloc.c if (idx >= start) { start 1189 drivers/infiniband/hw/hns/hns_roce_device.h struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 1192 drivers/infiniband/hw/hns/hns_roce_device.h int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, start 1224 drivers/infiniband/hw/hns/hns_roce_device.h int buf_cnt, int start, struct hns_roce_buf *buf); start 1226 drivers/infiniband/hw/hns/hns_roce_device.h int buf_cnt, int start, struct ib_umem *umem, start 876 drivers/infiniband/hw/hns/hns_roce_hem.c unsigned long start, unsigned long end) start 891 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = start; i <= end; i += inc) { start 900 drivers/infiniband/hw/hns/hns_roce_hem.c while (i > start) { start 909 drivers/infiniband/hw/hns/hns_roce_hem.c unsigned long start, unsigned long end) start 921 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = start; i <= end; i += inc) start 1124 drivers/infiniband/hw/hns/hns_roce_hem.c int start; /* start buf offset in this hem */ start 1129 drivers/infiniband/hw/hns/hns_roce_hem.c int start, int end, start 1150 drivers/infiniband/hw/hns/hns_roce_hem.c hem->start = start; start 1196 drivers/infiniband/hw/hns/hns_roce_hem.c return (hem->start <= offset && offset <= hem->end); start 1354 drivers/infiniband/hw/hns/hns_roce_hem.c step = (cur->start - pre->start) / step * BA_BYTE_LEN; start 1446 drivers/infiniband/hw/hns/hns_roce_hem.c offset = hem->start / step * BA_BYTE_LEN; start 1561 drivers/infiniband/hw/hns/hns_roce_hem.c nr = offset - hem->start; start 120 drivers/infiniband/hw/hns/hns_roce_hem.h unsigned long start, unsigned long end); start 123 drivers/infiniband/hw/hns/hns_roce_hem.h unsigned long start, unsigned long end); start 1106 drivers/infiniband/hw/hns/hns_roce_hw_v1.c unsigned long start = jiffies; start 1151 drivers/infiniband/hw/hns/hns_roce_hw_v1.c mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); start 1130 drivers/infiniband/hw/hns/hns_roce_mr.c struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 1146 drivers/infiniband/hw/hns/hns_roce_mr.c mr->umem = ib_umem_get(udata, start, length, access_flags, 0); start 1209 drivers/infiniband/hw/hns/hns_roce_mr.c u64 start, u64 length, start 1231 drivers/infiniband/hw/hns/hns_roce_mr.c mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0); start 1285 drivers/infiniband/hw/hns/hns_roce_mr.c int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, start 1322 drivers/infiniband/hw/hns/hns_roce_mr.c start, length, start 114 drivers/infiniband/hw/hns/hns_roce_pd.c uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; start 1736 drivers/infiniband/hw/i40iw/i40iw_verbs.c u64 start, start 1766 drivers/infiniband/hw/i40iw/i40iw_verbs.c region = ib_umem_get(udata, start, length, acc, 0); start 1792 drivers/infiniband/hw/i40iw/i40iw_verbs.c region_length = region->length + (start & (iwmr->page_size - 1)); start 733 drivers/infiniband/hw/mlx4/mlx4_ib.h struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 895 drivers/infiniband/hw/mlx4/mlx4_ib.h u64 start, u64 length, u64 virt_addr, start 370 drivers/infiniband/hw/mlx4/mr.c static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, start 380 drivers/infiniband/hw/mlx4/mr.c unsigned long untagged_start = untagged_addr(start); start 401 drivers/infiniband/hw/mlx4/mr.c return ib_umem_get(udata, start, length, access_flags, 0); start 404 drivers/infiniband/hw/mlx4/mr.c struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 418 drivers/infiniband/hw/mlx4/mr.c mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags); start 425 drivers/infiniband/hw/mlx4/mr.c shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); start 460 drivers/infiniband/hw/mlx4/mr.c u64 start, u64 length, u64 virt_addr, start 507 drivers/infiniband/hw/mlx4/mr.c mmr->umem = mlx4_get_umem_mr(udata, start, length, start 1148 drivers/infiniband/hw/mlx5/mlx5_ib.h struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 1166 drivers/infiniband/hw/mlx5/mlx5_ib.h int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, start 1255 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, start 1287 drivers/infiniband/hw/mlx5/mlx5_ib.h unsigned long start, start 81 drivers/infiniband/hw/mlx5/mr.c static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) start 84 drivers/infiniband/hw/mlx5/mr.c length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); start 756 drivers/infiniband/hw/mlx5/mr.c u64 start, u64 length, int access_flags, start 767 drivers/infiniband/hw/mlx5/mr.c odp = ib_umem_odp_get(udata, start, length, access_flags); start 782 drivers/infiniband/hw/mlx5/mr.c u = ib_umem_get(udata, start, length, access_flags, 0); start 788 drivers/infiniband/hw/mlx5/mr.c mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, start 1250 drivers/infiniband/hw/mlx5/mr.c struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 1268 drivers/infiniband/hw/mlx5/mr.c start, virt_addr, length, access_flags); start 1270 drivers/infiniband/hw/mlx5/mr.c if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start && start 1282 drivers/infiniband/hw/mlx5/mr.c err = mr_umem_get(dev, udata, start, length, access_flags, &umem, start 1392 drivers/infiniband/hw/mlx5/mr.c int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, start 1411 drivers/infiniband/hw/mlx5/mr.c start, virt_addr, length, access_flags); start 134 drivers/infiniband/hw/mlx5/odp.c static struct ib_umem_odp *odp_lookup(u64 start, u64 length, start 142 drivers/infiniband/hw/mlx5/odp.c odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length); start 153 drivers/infiniband/hw/mlx5/odp.c if (ib_umem_start(odp) > start + length) start 250 drivers/infiniband/hw/mlx5/odp.c void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, start 270 drivers/infiniband/hw/mlx5/odp.c start = max_t(u64, ib_umem_start(umem_odp), start); start 280 drivers/infiniband/hw/mlx5/odp.c for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { start 317 drivers/infiniband/hw/mlx5/odp.c ib_umem_odp_unmap_dma_pages(umem_odp, start, end); start 4120 drivers/infiniband/hw/mlx5/qp.c copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, start 4122 drivers/infiniband/hw/mlx5/qp.c memcpy(eseg->inline_hdr.start, pdata, copysz); start 4124 drivers/infiniband/hw/mlx5/qp.c sizeof(eseg->inline_hdr.start) + copysz, 16); start 66 drivers/infiniband/hw/mthca/mthca_cq.c __be64 start; start 55 drivers/infiniband/hw/mthca/mthca_eq.c __be64 start; start 322 drivers/infiniband/hw/mthca/mthca_memfree.c int start, int end) start 327 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = start; i <= end; i += inc) { start 336 drivers/infiniband/hw/mthca/mthca_memfree.c while (i > start) { start 345 drivers/infiniband/hw/mthca/mthca_memfree.c int start, int end) start 352 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) start 570 drivers/infiniband/hw/mthca/mthca_memfree.c int start, end, dir; start 581 drivers/infiniband/hw/mthca/mthca_memfree.c start = 0; start 590 drivers/infiniband/hw/mthca/mthca_memfree.c start = dev->db_tab->npages - 1; start 600 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = start; i != end; i += dir) start 608 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = start; i != end; i += dir) start 95 drivers/infiniband/hw/mthca/mthca_memfree.h int start, int end); start 97 drivers/infiniband/hw/mthca/mthca_memfree.h int start, int end); start 55 drivers/infiniband/hw/mthca/mthca_mr.c __be64 start; start 469 drivers/infiniband/hw/mthca/mthca_mr.c mpt_entry->start = cpu_to_be64(iova); start 639 drivers/infiniband/hw/mthca/mthca_mr.c memset(&mpt_entry->start, 0, start 640 drivers/infiniband/hw/mthca/mthca_mr.c sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start)); start 746 drivers/infiniband/hw/mthca/mthca_mr.c mpt_entry.start = cpu_to_be64(iova); start 749 drivers/infiniband/hw/mthca/mthca_mr.c memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, start 751 drivers/infiniband/hw/mthca/mthca_mr.c offsetof(struct mthca_mpt_entry, start)); start 796 drivers/infiniband/hw/mthca/mthca_mr.c fmr->mem.arbel.mpt->start = cpu_to_be64(iova); start 71 drivers/infiniband/hw/mthca/mthca_profile.c u64 start; start 144 drivers/infiniband/hw/mthca/mthca_profile.c profile[i].start = mem_base + total_size; start 160 drivers/infiniband/hw/mthca/mthca_profile.c (unsigned long long) profile[i].start, start 176 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->qpc_base = profile[i].start; start 181 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->eec_base = profile[i].start; start 186 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->srqc_base = profile[i].start; start 191 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->cqc_base = profile[i].start; start 195 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->eqpc_base = profile[i].start; start 198 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->eeec_base = profile[i].start; start 202 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->eqc_base = profile[i].start; start 210 drivers/infiniband/hw/mthca/mthca_profile.c dev->qp_table.rdb_base = (u32) profile[i].start; start 211 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->rdb_base = profile[i].start; start 216 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->mc_base = profile[i].start; start 223 drivers/infiniband/hw/mthca/mthca_profile.c dev->mr_table.mpt_base = profile[i].start; start 224 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->mpt_base = profile[i].start; start 229 drivers/infiniband/hw/mthca/mthca_profile.c dev->mr_table.mtt_base = profile[i].start; start 230 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->mtt_base = profile[i].start; start 235 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->uar_scratch_base = profile[i].start; start 238 drivers/infiniband/hw/mthca/mthca_profile.c dev->av_table.ddr_av_base = profile[i].start; start 243 drivers/infiniband/hw/mthca/mthca_profile.c dev->uar_table.uarc_base = profile[i].start; start 244 drivers/infiniband/hw/mthca/mthca_profile.c init_hca->uarc_base = profile[i].start; start 854 drivers/infiniband/hw/mthca/mthca_provider.c static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 883 drivers/infiniband/hw/mthca/mthca_provider.c mr->umem = ib_umem_get(udata, start, length, acc, start 49 drivers/infiniband/hw/ocrdma/ocrdma_stats.c static int ocrdma_add_stat(char *start, char *pcur, start 58 drivers/infiniband/hw/ocrdma/ocrdma_stats.c if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) { start 861 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, start 878 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c mr->umem = ib_umem_get(udata, start, len, acc, 0); start 103 drivers/infiniband/hw/ocrdma/ocrdma_verbs.h struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, start 2602 drivers/infiniband/hw/qedr/verbs.c struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, start 2613 drivers/infiniband/hw/qedr/verbs.c pd->pd_id, start, len, usr_addr, acc); start 2624 drivers/infiniband/hw/qedr/verbs.c mr->umem = ib_umem_get(udata, start, len, acc, 0); start 81 drivers/infiniband/hw/qedr/verbs.h struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length, start 45 drivers/infiniband/hw/qib/qib_debugfs.c .start = _##name##_seq_start, \ start 831 drivers/infiniband/hw/qib/qib_file_ops.c unsigned long start, size; start 857 drivers/infiniband/hw/qib/qib_file_ops.c start = vma->vm_start; start 859 drivers/infiniband/hw/qib/qib_file_ops.c for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) { start 861 drivers/infiniband/hw/qib/qib_file_ops.c ret = remap_pfn_range(vma, start, pfn, size, start 2943 drivers/infiniband/hw/qib/qib_iba6120.c u32 start) start 2947 drivers/infiniband/hw/qib/qib_iba6120.c if (start && intv) { start 2949 drivers/infiniband/hw/qib/qib_iba6120.c mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start)); start 3393 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start, start 4157 drivers/infiniband/hw/qib/qib_iba7220.c u32 start) start 4160 drivers/infiniband/hw/qib/qib_iba7220.c write_7220_creg(ppd->dd, cr_psstart, start); start 4360 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start, start 6727 drivers/infiniband/hw/qib/qib_iba7322.c u32 start) start 6730 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg_port(ppd, krp_psstart, start); start 7025 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, start 7029 drivers/infiniband/hw/qib/qib_iba7322.c const int last = start + len - 1; start 7047 drivers/infiniband/hw/qib/qib_iba7322.c for (cstart = start; cstart <= last; cstart++) { start 7078 drivers/infiniband/hw/qib/qib_iba7322.c for (i = start; i <= last; i++) start 7090 drivers/infiniband/hw/qib/qib_iba7322.c for (i = start; i <= last; i++) start 7096 drivers/infiniband/hw/qib/qib_iba7322.c for (i = start; i <= last; i++) { start 7124 drivers/infiniband/hw/qib/qib_iba7322.c for (i = start; i <= last; i++) { start 7147 drivers/infiniband/hw/qib/qib_iba7322.c for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i) start 7151 drivers/infiniband/hw/qib/qib_iba7322.c for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) { start 381 drivers/infiniband/hw/qib/qib_tx.c void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, start 386 drivers/infiniband/hw/qib/qib_tx.c unsigned ostart = start; start 389 drivers/infiniband/hw/qib/qib_tx.c start *= 2; start 390 drivers/infiniband/hw/qib/qib_tx.c end = start + len * 2; start 394 drivers/infiniband/hw/qib/qib_tx.c while (start < end) { start 412 drivers/infiniband/hw/qib/qib_tx.c i = start / BITS_PER_LONG; start 413 drivers/infiniband/hw/qib/qib_tx.c __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start, start 418 drivers/infiniband/hw/qib/qib_tx.c start) % BITS_PER_LONG, &dma)) start 420 drivers/infiniband/hw/qib/qib_tx.c start, dd->pioavailshadow); start 423 drivers/infiniband/hw/qib/qib_tx.c + start, dd->pioavailshadow); start 424 drivers/infiniband/hw/qib/qib_tx.c __set_bit(start, dd->pioavailkernel); start 425 drivers/infiniband/hw/qib/qib_tx.c if ((start >> 1) < dd->min_kernel_pio) start 426 drivers/infiniband/hw/qib/qib_tx.c dd->min_kernel_pio = start >> 1; start 428 drivers/infiniband/hw/qib/qib_tx.c __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, start 430 drivers/infiniband/hw/qib/qib_tx.c __clear_bit(start, dd->pioavailkernel); start 431 drivers/infiniband/hw/qib/qib_tx.c if ((start >> 1) > dd->min_kernel_pio) start 432 drivers/infiniband/hw/qib/qib_tx.c dd->min_kernel_pio = start >> 1; start 434 drivers/infiniband/hw/qib/qib_tx.c start += 2; start 605 drivers/infiniband/hw/usnic/usnic_ib_verbs.c struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, start 612 drivers/infiniband/hw/usnic/usnic_ib_verbs.c usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start, start 619 drivers/infiniband/hw/usnic/usnic_ib_verbs.c mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, start 64 drivers/infiniband/hw/usnic/usnic_ib_verbs.h struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, start 202 drivers/infiniband/hw/usnic/usnic_uiom.c va = interval->start << PAGE_SHIFT; start 203 drivers/infiniband/hw/usnic/usnic_uiom.c size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; start 266 drivers/infiniband/hw/usnic/usnic_uiom.c if ((va >> PAGE_SHIFT) < interval_node->start) start 269 drivers/infiniband/hw/usnic/usnic_uiom.c if ((va >> PAGE_SHIFT) == interval_node->start) { start 42 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c #define START(node) ((node)->start) start 45 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c #define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \ start 47 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c node = usnic_uiom_interval_node_alloc(start, \ start 57 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c #define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \ start 60 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c MAKE_NODE(node, start, end, \ start 70 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt, start 78 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c interval->start = start; start 94 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c if (node_a->start < node_b->start) start 96 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c else if (node_a->start > node_b->start) start 104 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c unsigned long start, unsigned long last, start 111 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c for (node = usnic_uiom_interval_tree_iter_first(root, start, last); start 113 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c node = usnic_uiom_interval_tree_iter_next(node, start, last)) start 119 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, start 126 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c long int pivot = start; start 131 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c find_intervals_intersection_sorted(root, start, last, start 135 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c if (pivot < interval->start) { start 136 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1, start 139 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c pivot = interval->start; start 178 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, start 184 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c unsigned long lpivot = start; start 189 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c find_intervals_intersection_sorted(root, start, last, start 197 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c istart = interval->start; start 250 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c unsigned long start, unsigned long last, start 255 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c for (interval = usnic_uiom_interval_tree_iter_first(root, start, last); start 258 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c start, start 42 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h unsigned long start; start 57 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h unsigned long start, start 61 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h unsigned long start, unsigned long last); start 67 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h unsigned long start, unsigned long last, start 75 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h unsigned long start, unsigned long last, start 81 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h int usnic_uiom_get_intervals_diff(unsigned long start, start 455 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h u64 start; start 785 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c unsigned long start; start 861 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); start 863 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->regs = ioremap(start, len); start 111 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 129 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c umem = ib_umem_get(udata, start, length, access_flags, 0); start 167 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c cmd->start = start; start 398 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c unsigned long start = vma->vm_start; start 413 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c if (io_remap_pfn_range(vma, start, context->uar.pfn, size, start 404 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 380 drivers/infiniband/sw/rdmavt/mr.c struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 393 drivers/infiniband/sw/rdmavt/mr.c umem = ib_umem_get(udata, start, length, mr_access_flags, 0); start 405 drivers/infiniband/sw/rdmavt/mr.c mr->mr.user_base = start; start 78 drivers/infiniband/sw/rdmavt/mr.h struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, start 109 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_mem_init_user(struct rxe_pd *pd, u64 start, start 160 drivers/infiniband/sw/rxe/rxe_mr.c int rxe_mem_init_user(struct rxe_pd *pd, u64 start, start 172 drivers/infiniband/sw/rxe/rxe_mr.c umem = ib_umem_get(udata, start, length, access, 0); start 227 drivers/infiniband/sw/rxe/rxe_mr.c mem->va = start; start 926 drivers/infiniband/sw/rxe/rxe_verbs.c u64 start, start 946 drivers/infiniband/sw/rxe/rxe_verbs.c err = rxe_mem_init_user(pd, start, length, iova, start 90 drivers/infiniband/sw/siw/siw_mem.c u64 start, u64 len, int rights) start 103 drivers/infiniband/sw/siw/siw_mem.c mem->va = start; start 368 drivers/infiniband/sw/siw/siw_mem.c struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) start 383 drivers/infiniband/sw/siw/siw_mem.c first_page_va = start & PAGE_MASK; start 384 drivers/infiniband/sw/siw/siw_mem.c num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; start 9 drivers/infiniband/sw/siw/siw_mem.h struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); start 23 drivers/infiniband/sw/siw/siw_mem.h u64 start, u64 len, int rights); start 1295 drivers/infiniband/sw/siw/siw_verbs.c struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, start 1307 drivers/infiniband/sw/siw/siw_verbs.c (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, start 1321 drivers/infiniband/sw/siw/siw_verbs.c (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT; start 1332 drivers/infiniband/sw/siw/siw_verbs.c umem = siw_umem_get(start, len, ib_access_writable(rights)); start 1344 drivers/infiniband/sw/siw/siw_verbs.c rv = siw_mr_add_mem(mr, pd, umem, start, len, rights); start 69 drivers/infiniband/sw/siw/siw_verbs.h struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len, start 128 drivers/infiniband/ulp/ipoib/ipoib_fs.c .start = ipoib_mcg_seq_start, start 233 drivers/infiniband/ulp/ipoib/ipoib_fs.c .start = ipoib_path_seq_start, start 568 drivers/input/input.c if (handle->open && handle->handler->start) start 569 drivers/input/input.c handle->handler->start(handle); start 1209 drivers/input/input.c .start = input_devices_seq_start, start 1273 drivers/input/input.c .start = input_handlers_seq_start, start 2413 drivers/input/input.c if (handler->start) start 2414 drivers/input/input.c handler->start(handle); start 237 drivers/input/joystick/analog.c u64 time[4], start, loop, now; start 251 drivers/input/joystick/analog.c start = now; start 270 drivers/input/joystick/analog.c } while (this && (i < 4) && (delta(start, now) < timeout)); start 278 drivers/input/joystick/analog.c port->axes[j] = (delta(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop; start 26 drivers/input/joystick/iforce/iforce-ff.c iforce->device_memory.start, iforce->device_memory.end, 2L, start 34 drivers/input/joystick/iforce/iforce-ff.c data[0] = LO(mod_chunk->start); start 35 drivers/input/joystick/iforce/iforce-ff.c data[1] = HI(mod_chunk->start); start 59 drivers/input/joystick/iforce/iforce-ff.c iforce->device_memory.start, iforce->device_memory.end, 2L, start 67 drivers/input/joystick/iforce/iforce-ff.c data[0] = LO(mod_chunk->start); start 68 drivers/input/joystick/iforce/iforce-ff.c data[1] = HI(mod_chunk->start); start 99 drivers/input/joystick/iforce/iforce-ff.c iforce->device_memory.start, iforce->device_memory.end, 2L, start 107 drivers/input/joystick/iforce/iforce-ff.c data[0] = LO(mod_chunk->start); start 108 drivers/input/joystick/iforce/iforce-ff.c data[1] = HI(mod_chunk->start); start 136 drivers/input/joystick/iforce/iforce-ff.c iforce->device_memory.start, iforce->device_memory.end, 2L, start 144 drivers/input/joystick/iforce/iforce-ff.c data[0] = LO(mod_chunk->start); start 145 drivers/input/joystick/iforce/iforce-ff.c data[1] = HI(mod_chunk->start); start 382 drivers/input/joystick/iforce/iforce-ff.c mod1_chunk->start, start 383 drivers/input/joystick/iforce/iforce-ff.c mod2_chunk->start, start 442 drivers/input/joystick/iforce/iforce-ff.c mod1_chunk->start, start 443 drivers/input/joystick/iforce/iforce-ff.c mod2_chunk->start, start 510 drivers/input/joystick/iforce/iforce-ff.c mod1_chunk->start, mod2_chunk->start, start 249 drivers/input/joystick/iforce/iforce-main.c iforce->device_memory.start = 0; start 109 drivers/input/joystick/iforce/iforce-packets.c (iforce->core_effects[i].mod1_chunk.start == addr || start 110 drivers/input/joystick/iforce/iforce-packets.c iforce->core_effects[i].mod2_chunk.start == addr)) { start 122 drivers/input/joystick/sidewinder.c int timeout, bitout, sched, i, kick, start, strobe; start 128 drivers/input/joystick/sidewinder.c start = gameport_time(gameport, SW_START); start 130 drivers/input/joystick/sidewinder.c bitout = start; start 172 drivers/input/joystick/sidewinder.c bitout = start; /* Long bit timeout */ start 206 drivers/input/keyboard/davinci_keyscan.c davinci_ks->pbase = res->start; start 264 drivers/input/keyboard/ep93xx_keypad.c res = request_mem_region(res->start, resource_size(res), pdev->name); start 270 drivers/input/keyboard/ep93xx_keypad.c keypad->mmio_base = ioremap(res->start, resource_size(res)); start 335 drivers/input/keyboard/ep93xx_keypad.c release_mem_region(res->start, resource_size(res)); start 359 drivers/input/keyboard/ep93xx_keypad.c release_mem_region(res->start, resource_size(res)); start 121 drivers/input/keyboard/goldfish_events.c addr = devm_ioremap(&pdev->dev, res->start, 4096); start 306 drivers/input/keyboard/hilkbd.c (void *)dev->hpa.start); start 310 drivers/input/keyboard/hilkbd.c hil_base = dev->hpa.start; start 260 drivers/input/keyboard/nomadik-ske-keypad.c if (!request_mem_region(res->start, resource_size(res), pdev->name)) { start 266 drivers/input/keyboard/nomadik-ske-keypad.c keypad->reg_base = ioremap(res->start, resource_size(res)); start 360 drivers/input/keyboard/nomadik-ske-keypad.c release_mem_region(res->start, resource_size(res)); start 383 drivers/input/keyboard/nomadik-ske-keypad.c release_mem_region(res->start, resource_size(res)); start 260 drivers/input/keyboard/omap4-keypad.c res = request_mem_region(res->start, resource_size(res), pdev->name); start 267 drivers/input/keyboard/omap4-keypad.c keypad_data->base = ioremap(res->start, resource_size(res)); start 378 drivers/input/keyboard/omap4-keypad.c release_mem_region(res->start, resource_size(res)); start 398 drivers/input/keyboard/omap4-keypad.c release_mem_region(res->start, resource_size(res)); start 110 drivers/input/keyboard/pxa930_rotary.c r->mmio_base = ioremap_nocache(res->start, resource_size(res)); start 362 drivers/input/keyboard/samsung-keypad.c keypad->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 198 drivers/input/keyboard/sh_keysc.c priv->iomem_base = ioremap_nocache(res->start, resource_size(res)); start 106 drivers/input/misc/cobalt_btns.c bdev->reg = ioremap(res->start, resource_size(res)); start 199 drivers/input/misc/msm-vibrator.c vibrator->base = devm_ioremap(&pdev->dev, res->start, start 170 drivers/input/mouse/pxa930_trkball.c trkball->mmio_base = ioremap_nocache(res->start, resource_size(res)); start 717 drivers/input/mouse/synaptics.c serio->start = synaptics_pt_start; start 85 drivers/input/rmi4/rmi_f54.c int start; start 561 drivers/input/rmi4/rmi_f54.c fifo[0] = report->start & 0xff; start 562 drivers/input/rmi4/rmi_f54.c fifo[1] = (report->start >> 8) & 0xff; start 135 drivers/input/serio/ambakmi.c kmi->base = ioremap(dev->res.start, resource_size(&dev->res)); start 45 drivers/input/serio/ct82c710.c #define CT82C710_DATA ct82c710_iores.start start 46 drivers/input/serio/ct82c710.c #define CT82C710_STATUS (ct82c710_iores.start + 1) start 153 drivers/input/serio/ct82c710.c ct82c710_iores.start = inb_p(0x391) << 2; /* Get mouse I/O address */ start 154 drivers/input/serio/ct82c710.c ct82c710_iores.end = ct82c710_iores.start + 1; start 331 drivers/input/serio/gscps2.c unsigned long hpa = dev->hpa.start; start 401 drivers/input/serio/gscps2.c release_mem_region(dev->hpa.start, GSC_STATUS + 4); start 379 drivers/input/serio/hp_sdc.c goto start; start 400 drivers/input/serio/hp_sdc.c start: start 946 drivers/input/serio/hp_sdc.c hp_sdc.base_io = d->hpa.start; start 947 drivers/input/serio/hp_sdc.c hp_sdc.data_io = d->hpa.start + 0x800; start 948 drivers/input/serio/hp_sdc.c hp_sdc.status_io = d->hpa.start + 0x801; start 340 drivers/input/serio/hyperv-keyboard.c hv_serio->start = hv_kbd_start; start 1328 drivers/input/serio/i8042.c serio->start = i8042_start; start 1357 drivers/input/serio/i8042.c serio->start = i8042_start; start 139 drivers/input/serio/ps2mult.c serio->start = ps2mult_serio_start; start 295 drivers/input/serio/sa1111ps2.c if (!request_mem_region(dev->res.start, start 296 drivers/input/serio/sa1111ps2.c dev->res.end - dev->res.start + 1, start 336 drivers/input/serio/sa1111ps2.c release_mem_region(dev->res.start, resource_size(&dev->res)); start 352 drivers/input/serio/sa1111ps2.c release_mem_region(dev->res.start, resource_size(&dev->res)); start 539 drivers/input/serio/serio.c if (serio->start) start 540 drivers/input/serio/serio.c serio->start(serio); start 234 drivers/input/serio/sun4i-ps2.c drvdata->reg_base = ioremap(res->start, resource_size(res)); start 268 drivers/input/serio/xilinx_ps2.c phys_addr = r_mem.start; start 344 drivers/input/serio/xilinx_ps2.c release_mem_region(r_mem.start, resource_size(&r_mem)); start 191 drivers/input/touchscreen/imx6ul_tsc.c u32 start; start 205 drivers/input/touchscreen/imx6ul_tsc.c start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL); start 206 drivers/input/touchscreen/imx6ul_tsc.c start |= START_SENSE; start 207 drivers/input/touchscreen/imx6ul_tsc.c start &= ~TSC_DISABLE; start 208 drivers/input/touchscreen/imx6ul_tsc.c writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); start 266 drivers/input/touchscreen/imx6ul_tsc.c u32 start; start 275 drivers/input/touchscreen/imx6ul_tsc.c start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL); start 276 drivers/input/touchscreen/imx6ul_tsc.c start |= START_SENSE; start 277 drivers/input/touchscreen/imx6ul_tsc.c writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); start 115 drivers/input/touchscreen/iqs5xx.c char start; start 792 drivers/input/touchscreen/iqs5xx.c if (rec->start != ':') { start 231 drivers/input/touchscreen/lpc32xx_ts.c if (!request_mem_region(res->start, size, pdev->name)) { start 237 drivers/input/touchscreen/lpc32xx_ts.c tsc->tsc_base = ioremap(res->start, size); start 295 drivers/input/touchscreen/lpc32xx_ts.c release_mem_region(res->start, size); start 316 drivers/input/touchscreen/lpc32xx_ts.c release_mem_region(res->start, resource_size(res)); start 282 drivers/input/touchscreen/rohm_bu21023.c static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf, start 292 drivers/input/touchscreen/rohm_bu21023.c msg[0].buf = &start; start 274 drivers/input/touchscreen/s3c2410_ts.c ts.io = ioremap(res->start, resource_size(res)); start 2418 drivers/iommu/amd_iommu.c dma_addr_t address, start, ret; start 2433 drivers/iommu/amd_iommu.c start = address; start 2435 drivers/iommu/amd_iommu.c ret = iommu_map_page(&dma_dom->domain, start, paddr, start 2441 drivers/iommu/amd_iommu.c start += PAGE_SIZE; start 2453 drivers/iommu/amd_iommu.c start -= PAGE_SIZE; start 2454 drivers/iommu/amd_iommu.c iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); start 2476 drivers/iommu/amd_iommu.c dma_addr_t i, start; start 2481 drivers/iommu/amd_iommu.c start = dma_addr; start 2484 drivers/iommu/amd_iommu.c iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); start 2485 drivers/iommu/amd_iommu.c start += PAGE_SIZE; start 2832 drivers/iommu/amd_iommu.c IOVA_PFN(r->start), start 3242 drivers/iommu/amd_iommu.c unsigned long start, end; start 3244 drivers/iommu/amd_iommu.c start = IOVA_PFN(region->start); start 3245 drivers/iommu/amd_iommu.c end = IOVA_PFN(region->start + region->length - 1); start 3247 drivers/iommu/amd_iommu.c WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); start 346 drivers/iommu/amd_iommu_init.c u64 start = iommu->exclusion_start & PAGE_MASK; start 347 drivers/iommu/amd_iommu_init.c u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; start 353 drivers/iommu/amd_iommu_init.c entry = start | MMIO_EXCL_ENABLE_MASK; start 363 drivers/iommu/amd_iommu_v2.c unsigned long start, unsigned long end) start 371 drivers/iommu/amd_iommu_v2.c if ((start ^ (end - 1)) < PAGE_SIZE) start 373 drivers/iommu/amd_iommu_v2.c start); start 1993 drivers/iommu/arm-smmu-v3.c unsigned long start = iova, end = iova + size; start 2030 drivers/iommu/arm-smmu-v3.c arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); start 2488 drivers/iommu/arm-smmu-v3.c arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, start 3608 drivers/iommu/arm-smmu-v3.c ioaddr = res->start; start 222 drivers/iommu/arm-smmu.c static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) start 227 drivers/iommu/arm-smmu.c idx = find_next_zero_bit(map, end, start); start 631 drivers/iommu/arm-smmu.c int irq, start, ret = 0; start 701 drivers/iommu/arm-smmu.c start = smmu->num_s2_context_banks; start 724 drivers/iommu/arm-smmu.c start = 0; start 743 drivers/iommu/arm-smmu.c ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, start 2032 drivers/iommu/arm-smmu.c ioaddr = res->start; start 170 drivers/iommu/dma-iommu.c phys_addr_t start, phys_addr_t end) start 176 drivers/iommu/dma-iommu.c start -= iova_offset(iovad, start); start 177 drivers/iommu/dma-iommu.c num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); start 184 drivers/iommu/dma-iommu.c msi_page->phys = start; start 185 drivers/iommu/dma-iommu.c msi_page->iova = start; start 188 drivers/iommu/dma-iommu.c start += iovad->granule; start 200 drivers/iommu/dma-iommu.c phys_addr_t start = 0, end; start 206 drivers/iommu/dma-iommu.c lo = iova_pfn(iovad, window->res->start - window->offset); start 213 drivers/iommu/dma-iommu.c end = window->res->start - window->offset; start 215 drivers/iommu/dma-iommu.c if (end > start) { start 216 drivers/iommu/dma-iommu.c lo = iova_pfn(iovad, start); start 225 drivers/iommu/dma-iommu.c start = window->res->end - window->offset + 1; start 260 drivers/iommu/dma-iommu.c lo = iova_pfn(iovad, region->start); start 261 drivers/iommu/dma-iommu.c hi = iova_pfn(iovad, region->start + region->length - 1); start 265 drivers/iommu/dma-iommu.c ret = cookie_init_hw_msi_region(cookie, region->start, start 266 drivers/iommu/dma-iommu.c region->start + region->length); start 888 drivers/iommu/dma-iommu.c dma_addr_t start, end; start 899 drivers/iommu/dma-iommu.c start = sg_dma_address(sg); start 906 drivers/iommu/dma-iommu.c __iommu_dma_unmap(dev, start, end - start); start 81 drivers/iommu/dmar.c void *dmar_alloc_dev_scope(void *start, void *end, int *cnt) start 86 drivers/iommu/dmar.c while (start < end) { start 87 drivers/iommu/dmar.c scope = start; start 96 drivers/iommu/dmar.c start += scope->length; start 219 drivers/iommu/dmar.c void *start, void*end, u16 segment, start 231 drivers/iommu/dmar.c for (; start < end; start += scope->length) { start 232 drivers/iommu/dmar.c scope = start; start 556 drivers/iommu/dmar.c static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, start 560 drivers/iommu/dmar.c struct acpi_dmar_header *end = ((void *)start) + len; start 562 drivers/iommu/dmar.c for (iter = start; iter < end; iter = next) { start 1934 drivers/iommu/dmar.c struct acpi_dmar_header *start; start 1953 drivers/iommu/dmar.c start = (struct acpi_dmar_header *)obj->buffer.pointer; start 1954 drivers/iommu/dmar.c ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); start 1817 drivers/iommu/intel-iommu.c IOVA_PFN(r->start), start 2669 drivers/iommu/intel-iommu.c unsigned long long start, start 2672 drivers/iommu/intel-iommu.c unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; start 2681 drivers/iommu/intel-iommu.c pr_debug("Mapping reserved region %llx-%llx\n", start, end); start 2695 drivers/iommu/intel-iommu.c unsigned long long start, start 2704 drivers/iommu/intel-iommu.c start, end); start 2708 drivers/iommu/intel-iommu.c dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end); start 2710 drivers/iommu/intel-iommu.c if (end < start) { start 2729 drivers/iommu/intel-iommu.c return iommu_domain_identity_map(domain, start, end); start 2771 drivers/iommu/intel-iommu.c unsigned long long start = rmrr->base_address; start 2774 drivers/iommu/intel-iommu.c if (WARN_ON(end < start || start 2778 drivers/iommu/intel-iommu.c ret = iommu_domain_identity_map(si_domain, start, end); start 4653 drivers/iommu/intel-iommu.c unsigned long long start, end; start 4658 drivers/iommu/intel-iommu.c start = mhp->start_pfn << PAGE_SHIFT; start 4660 drivers/iommu/intel-iommu.c if (iommu_domain_identity_map(si_domain, start, end)) { start 4662 drivers/iommu/intel-iommu.c start, end); start 5788 drivers/iommu/intel-iommu.c unsigned long start, end; start 5790 drivers/iommu/intel-iommu.c start = IOVA_PFN(region->start); start 5791 drivers/iommu/intel-iommu.c end = IOVA_PFN(region->start + region->length - 1); start 5793 drivers/iommu/intel-iommu.c WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); start 31 drivers/iommu/intel-pasid.c int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp) start 35 drivers/iommu/intel-pasid.c min = max_t(int, start, PASID_MIN); start 79 drivers/iommu/intel-pasid.h int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp); start 168 drivers/iommu/intel-svm.c unsigned long start, unsigned long end) start 172 drivers/iommu/intel-svm.c intel_flush_svm_range(svm, start, start 173 drivers/iommu/intel-svm.c (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); start 239 drivers/iommu/intel_irq_remapping.c struct irte *start, *entry, *end; start 249 drivers/iommu/intel_irq_remapping.c start = iommu->ir_table->base + index; start 250 drivers/iommu/intel_irq_remapping.c end = start + (1 << irq_iommu->irte_mask); start 252 drivers/iommu/intel_irq_remapping.c for (entry = start; entry < end; entry++) { start 930 drivers/iommu/intel_irq_remapping.c void *start, *end; start 933 drivers/iommu/intel_irq_remapping.c start = (void *)(drhd + 1); start 936 drivers/iommu/intel_irq_remapping.c while (start < end && ret == 0) { start 937 drivers/iommu/intel_irq_remapping.c scope = start; start 942 drivers/iommu/intel_irq_remapping.c start += scope->length; start 508 drivers/iommu/io-pgtable-arm.c arm_lpae_iopte *start, *end; start 516 drivers/iommu/io-pgtable-arm.c start = ptep; start 533 drivers/iommu/io-pgtable-arm.c __arm_lpae_free_pages(start, table_size, &data->iop.cfg); start 298 drivers/iommu/iommu.c nr = iommu_alloc_resv_region(new->start, new->length, start 305 drivers/iommu/iommu.c if (nr->start < iter->start || start 306 drivers/iommu/iommu.c (nr->start == iter->start && nr->type <= iter->type)) start 313 drivers/iommu/iommu.c phys_addr_t top_end, iter_end = iter->start + iter->length - 1; start 330 drivers/iommu/iommu.c top_end = top->start + top->length - 1; start 332 drivers/iommu/iommu.c if (iter->start > top_end + 1) { start 335 drivers/iommu/iommu.c top->length = max(top_end, iter_end) - top->start + 1; start 393 drivers/iommu/iommu.c (long long int)region->start, start 394 drivers/iommu/iommu.c (long long int)(region->start + start 644 drivers/iommu/iommu.c dma_addr_t start, end, addr; start 649 drivers/iommu/iommu.c start = ALIGN(entry->start, pg_size); start 650 drivers/iommu/iommu.c end = ALIGN(entry->start + entry->length, pg_size); start 656 drivers/iommu/iommu.c for (addr = start; addr < end; addr += pg_size) { start 1999 drivers/iommu/iommu.c phys_addr_t start; start 2006 drivers/iommu/iommu.c if (len && s_phys != start + len) { start 2007 drivers/iommu/iommu.c ret = iommu_map(domain, iova + mapped, start, len, prot); start 2019 drivers/iommu/iommu.c start = s_phys; start 2172 drivers/iommu/iommu.c struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, start 2183 drivers/iommu/iommu.c region->start = start; start 155 drivers/iommu/iova.c struct rb_node *start) start 159 drivers/iommu/iova.c new = (start) ? &start : &(root->rb_node); start 196 drivers/iommu/msm_iommu.c static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) start 201 drivers/iommu/msm_iommu.c idx = find_next_zero_bit(map, end, start); start 765 drivers/iommu/msm_iommu.c ioaddr = r->start; start 692 drivers/iommu/mtk_iommu.c ioaddr = res->start; start 427 drivers/iommu/omap-iommu.c u32 start; start 433 drivers/iommu/omap-iommu.c start = iotlb_cr_to_virt(&cr); start 436 drivers/iommu/omap-iommu.c if ((start <= da) && (da < start + bytes)) { start 438 drivers/iommu/omap-iommu.c __func__, start, da, bytes); start 354 drivers/iommu/tegra-gart.c gart->iovmm_base = res->start; start 325 drivers/iommu/virtio-iommu.c mapping->iova.start = iova; start 363 drivers/iommu/virtio-iommu.c if (mapping->iova.start < iova) start 370 drivers/iommu/virtio-iommu.c unmapped += mapping->iova.last - mapping->iova.start + 1; start 402 drivers/iommu/virtio-iommu.c .virt_start = cpu_to_le64(mapping->iova.start), start 425 drivers/iommu/virtio-iommu.c phys_addr_t start, end; start 429 drivers/iommu/virtio-iommu.c start = start64 = le64_to_cpu(mem->start); start 434 drivers/iommu/virtio-iommu.c if (start != start64 || end != end64 || size < end64 - start64) start 446 drivers/iommu/virtio-iommu.c region = iommu_alloc_resv_region(start, size, 0, start 450 drivers/iommu/virtio-iommu.c region = iommu_alloc_resv_region(start, size, prot, start 795 drivers/iommu/virtio-iommu.c paddr = mapping->paddr + (iova - mapping->iova.start); start 1047 drivers/iommu/virtio-iommu.c struct virtio_iommu_config, input_range.start, start 1055 drivers/iommu/virtio-iommu.c struct virtio_iommu_config, domain_range.start, start 499 drivers/ipack/carriers/tpci200.c dev->region[space].start = start 280 drivers/ipack/devices/ipoctal.c region->start, region->size); start 296 drivers/ipack/devices/ipoctal.c region->start, region->size); start 307 drivers/ipack/devices/ipoctal.c region->start, 0x8000); start 354 drivers/ipack/ipack.c idmem = ioremap(dev->region[IPACK_ID_SPACE].start, start 256 drivers/irqchip/irq-alpine-msi.c priv->addr = res.start & GENMASK_ULL(63,20); start 656 drivers/irqchip/irq-armada-370-xp.c BUG_ON(!request_mem_region(main_int_res.start, start 659 drivers/irqchip/irq-armada-370-xp.c BUG_ON(!request_mem_region(per_cpu_int_res.start, start 663 drivers/irqchip/irq-armada-370-xp.c main_int_base = ioremap(main_int_res.start, start 667 drivers/irqchip/irq-armada-370-xp.c per_cpu_int_base = ioremap(per_cpu_int_res.start, start 687 drivers/irqchip/irq-armada-370-xp.c armada_370_xp_msi_init(node, main_int_res.start); start 260 drivers/irqchip/irq-bcm6345-l1.c cpu->map_base = ioremap(res.start, sz); start 270 drivers/irqchip/irq-bcm7038-l1.c cpu->map_base = ioremap(res.start, sz); start 231 drivers/irqchip/irq-clps711x.c return _clps711x_intc_init(np, res.start, resource_size(&res)); start 87 drivers/irqchip/irq-davinci-aintc.c req = request_mem_region(config->reg.start, start 95 drivers/irqchip/irq-davinci-aintc.c davinci_aintc_base = ioremap(config->reg.start, start 166 drivers/irqchip/irq-davinci-cp-intc.c req = request_mem_region(config->reg.start, start 174 drivers/irqchip/irq-davinci-cp-intc.c davinci_cp_intc_base = ioremap(config->reg.start, start 92 drivers/irqchip/irq-dw-apb-ictl.c if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { start 97 drivers/irqchip/irq-dw-apb-ictl.c iobase = ioremap(r.start, resource_size(&r)); start 156 drivers/irqchip/irq-dw-apb-ictl.c release_mem_region(r.start, resource_size(&r)); start 102 drivers/irqchip/irq-gic-v2m.c return v2m->res.start | ((hwirq - 32) << 3); start 104 drivers/irqchip/irq-gic-v2m.c return v2m->res.start + V2M_MSI_SETSPI_NS; start 336 drivers/irqchip/irq-gic-v2m.c v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); start 509 drivers/irqchip/irq-gic-v2m.c res.start = m->base_address; start 515 drivers/irqchip/irq-gic-v2m.c res.end = res.start + SZ_8K - 1; start 528 drivers/irqchip/irq-gic-v2m.c fwnode = irq_domain_alloc_fwnode(&res.start); start 1658 drivers/irqchip/irq-gic-v3-its.c phys_addr_t start, end, addr_end; start 1671 drivers/irqchip/irq-gic-v3-its.c for_each_reserved_mem_region(i, &start, &end) { start 1672 drivers/irqchip/irq-gic-v3-its.c if (addr >= start && addr_end <= end) start 3546 drivers/irqchip/irq-gic-v3-its.c &res->start); start 3562 drivers/irqchip/irq-gic-v3-its.c &res->start, its_number); start 3579 drivers/irqchip/irq-gic-v3-its.c its_base = ioremap(res->start, resource_size(res)); start 3581 drivers/irqchip/irq-gic-v3-its.c pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); start 3587 drivers/irqchip/irq-gic-v3-its.c pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); start 3594 drivers/irqchip/irq-gic-v3-its.c pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); start 3612 drivers/irqchip/irq-gic-v3-its.c its->phys_base = res->start; start 3625 drivers/irqchip/irq-gic-v3-its.c &res->start, err); start 3627 drivers/irqchip/irq-gic-v3-its.c pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); start 3708 drivers/irqchip/irq-gic-v3-its.c pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); start 3940 drivers/irqchip/irq-gic-v3-its.c res.start = its_entry->base_address; start 3944 drivers/irqchip/irq-gic-v3-its.c dom_handle = irq_domain_alloc_fwnode(&res.start); start 3947 drivers/irqchip/irq-gic-v3-its.c &res.start); start 3951 drivers/irqchip/irq-gic-v3-its.c err = iort_register_domain_token(its_entry->translation_id, res.start, start 3955 drivers/irqchip/irq-gic-v3-its.c &res.start, its_entry->translation_id); start 318 drivers/irqchip/irq-gic-v3-mbi.c mbi_phys_base = res.start; start 1766 drivers/irqchip/irq-gic-v3.c rdist_regs[i].phys_base = res.start; start 2032 drivers/irqchip/irq-gic-v3.c vcpu->start = acpi_data.vcpu_base; start 2033 drivers/irqchip/irq-gic-v3.c vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; start 967 drivers/irqchip/irq-gic.c gic_dist_physaddr = res.start; start 1297 drivers/irqchip/irq-gic.c alt = ioremap(cpuif_res.start, SZ_8K); start 1307 drivers/irqchip/irq-gic.c &cpuif_res.start); start 1320 drivers/irqchip/irq-gic.c alt = ioremap(cpuif_res.start, SZ_128K); start 1324 drivers/irqchip/irq-gic.c &cpuif_res.start); start 1325 drivers/irqchip/irq-gic.c cpuif_res.end = cpuif_res.start + SZ_128K -1; start 1345 drivers/irqchip/irq-gic.c cpuif_res.start += 0xf000; start 1347 drivers/irqchip/irq-gic.c &cpuif_res.start); start 1575 drivers/irqchip/irq-gic.c vctrl_res->start = acpi_data.vctrl_base; start 1576 drivers/irqchip/irq-gic.c vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1; start 1582 drivers/irqchip/irq-gic.c vcpu_res->start = acpi_data.vcpu_base; start 1583 drivers/irqchip/irq-gic.c vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; start 282 drivers/irqchip/irq-i8259.c .start = PIC_MASTER_CMD, start 289 drivers/irqchip/irq-i8259.c .start = PIC_SLAVE_CMD, start 327 drivers/irqchip/irq-imgpdc.c priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start, start 360 drivers/irqchip/irq-ls-scfg-msi.c msi_data->msiir_addr = res->start; start 343 drivers/irqchip/irq-mbigen.c mgn_chip->base = devm_ioremap(&pdev->dev, res->start, start 709 drivers/irqchip/irq-mips-gic.c gic_base = res.start; start 26 drivers/irqchip/irq-mvebu-gicp.c unsigned int start; start 48 drivers/irqchip/irq-mvebu-gicp.c return r->start + idx; start 59 drivers/irqchip/irq-mvebu-gicp.c phys_addr_t setspi = gicp->res->start + GICP_SETSPI_NSR_OFFSET; start 60 drivers/irqchip/irq-mvebu-gicp.c phys_addr_t clrspi = gicp->res->start + GICP_CLRSPI_NSR_OFFSET; start 204 drivers/irqchip/irq-mvebu-gicp.c &gicp->spi_ranges[i].start); start 62 drivers/irqchip/irq-mvebu-odmi.c addr = odmi->res.start + GICP_ODMIN_SET; start 142 drivers/irqchip/irq-mvebu-sei.c phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET; start 314 drivers/irqchip/irq-omap-intc.c base = res.start; start 173 drivers/irqchip/irq-ompic.c ompic_base = ioremap(res.start, resource_size(&res)); start 82 drivers/irqchip/irq-orion.c if (!request_mem_region(r.start, resource_size(&r), np->name)) start 86 drivers/irqchip/irq-orion.c gc->reg_base = ioremap(r.start, resource_size(&r)); start 170 drivers/irqchip/irq-orion.c if (!request_mem_region(r.start, resource_size(&r), np->name)) { start 183 drivers/irqchip/irq-orion.c gc->reg_base = ioremap(r.start, resource_size(&r)); start 428 drivers/irqchip/irq-renesas-intc-irqpin.c p->irq[k].requested_irq = irq->start; start 463 drivers/irqchip/irq-renesas-intc-irqpin.c i->iomem = devm_ioremap_nocache(dev, io[k]->start, start 151 drivers/irqchip/irq-renesas-irqc.c p->irq[k].requested_irq = irq->start; start 193 drivers/irqchip/irq-sni-exiu.c data->base = ioremap(res->start, resource_size(res)); start 190 drivers/irqchip/irq-tango.c chip->ctl = res.start - baseres->start; start 111 drivers/irqchip/irq-tb10x.c if (!request_mem_region(mem.start, resource_size(&mem), start 117 drivers/irqchip/irq-tb10x.c reg_base = ioremap(mem.start, resource_size(&mem)); start 181 drivers/irqchip/irq-tb10x.c release_mem_region(mem.start, resource_size(&mem)); start 1245 drivers/isdn/capi/capi.c .start = capinc_tty_start, start 98 drivers/isdn/capi/kcapi_proc.c .start = controller_start, start 105 drivers/isdn/capi/kcapi_proc.c .start = controller_start, start 180 drivers/isdn/capi/kcapi_proc.c .start = applications_start, start 187 drivers/isdn/capi/kcapi_proc.c .start = applications_start, start 222 drivers/isdn/capi/kcapi_proc.c .start = capi_driver_start, start 4391 drivers/isdn/hardware/mISDN/hfcmulti.c hc->plx_origmembase = hc->pci_dev->resource[0].start; start 4413 drivers/isdn/hardware/mISDN/hfcmulti.c hc->pci_origmembase = hc->pci_dev->resource[2].start; start 4444 drivers/isdn/hardware/mISDN/hfcmulti.c hc->pci_origmembase = hc->pci_dev->resource[1].start; start 4472 drivers/isdn/hardware/mISDN/hfcmulti.c hc->pci_iobase = (u_int) hc->pci_dev->resource[0].start; start 2000 drivers/isdn/hardware/mISDN/hfcpci.c (char __iomem *)(unsigned long)hc->pdev->resource[1].start; start 88 drivers/isdn/hardware/mISDN/mISDNinfineon.c resource_size_t start; start 271 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inb((u32)hw->cfg.start + DIVA_PCI_CTRL); start 308 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inb((u32)hw->cfg.start + TIGER_AUX_STATUS); start 326 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inb((u32)hw->cfg.start + ELSA_IRQ_ADDR); start 344 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); start 349 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); start 399 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(TIGER_IRQ_BIT, (u32)hw->cfg.start + TIGER_AUX_IRQMASK); start 402 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(QS1000_IRQ_ON, (u32)hw->cfg.start + ELSA_IRQ_ADDR); start 405 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(QS3000_IRQ_ON, (u32)hw->cfg.start + ELSA_IRQ_ADDR); start 408 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); start 410 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); start 413 drivers/isdn/hardware/mISDN/mISDNinfineon.c w = inw((u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); start 415 drivers/isdn/hardware/mISDN/mISDNinfineon.c outw(w, (u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); start 419 drivers/isdn/hardware/mISDN/mISDNinfineon.c (u32)hw->cfg.start + GAZEL_INCSR); start 423 drivers/isdn/hardware/mISDN/mISDNinfineon.c (u32)hw->cfg.start + GAZEL_INCSR); start 443 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(0, (u32)hw->cfg.start + TIGER_AUX_IRQMASK); start 446 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(QS1000_IRQ_OFF, (u32)hw->cfg.start + ELSA_IRQ_ADDR); start 449 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(QS3000_IRQ_OFF, (u32)hw->cfg.start + ELSA_IRQ_ADDR); start 452 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); start 454 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); start 457 drivers/isdn/hardware/mISDN/mISDNinfineon.c w = inw((u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); start 459 drivers/isdn/hardware/mISDN/mISDNinfineon.c outw(w, (u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); start 463 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(0, (u32)hw->cfg.start + GAZEL_INCSR); start 492 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(0, (u32)hw->cfg.start + DIVA_PCI_CTRL); start 494 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(DIVA_RESET_BIT, (u32)hw->cfg.start + DIVA_PCI_CTRL); start 497 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(9, (u32)hw->cfg.start + 0x69); start 499 drivers/isdn/hardware/mISDN/mISDNinfineon.c (u32)hw->cfg.start + DIVA_PCI_CTRL); start 533 drivers/isdn/hardware/mISDN/mISDNinfineon.c w = inw((u32)hw->cfg.start + SCT_PLX_RESET_ADDR); start 535 drivers/isdn/hardware/mISDN/mISDNinfineon.c outw(w, (u32)hw->cfg.start + SCT_PLX_RESET_ADDR); start 537 drivers/isdn/hardware/mISDN/mISDNinfineon.c w = inw((u32)hw->cfg.start + SCT_PLX_RESET_ADDR); start 539 drivers/isdn/hardware/mISDN/mISDNinfineon.c outw(w, (u32)hw->cfg.start + SCT_PLX_RESET_ADDR); start 543 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inl((u32)hw->cfg.start + GAZEL_CNTRL); start 545 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); start 548 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); start 555 drivers/isdn/hardware/mISDN/mISDNinfineon.c val = inl((u32)hw->cfg.start + GAZEL_CNTRL); start 557 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); start 560 drivers/isdn/hardware/mISDN/mISDNinfineon.c outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); start 634 drivers/isdn/hardware/mISDN/mISDNinfineon.c release_mem_region(hw->cfg.start, hw->cfg.size); start 637 drivers/isdn/hardware/mISDN/mISDNinfineon.c release_region(hw->cfg.start, hw->cfg.size); start 642 drivers/isdn/hardware/mISDN/mISDNinfineon.c release_mem_region(hw->addr.start, hw->addr.size); start 645 drivers/isdn/hardware/mISDN/mISDNinfineon.c release_region(hw->addr.start, hw->addr.size); start 656 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->cfg.start = pci_resource_start(hw->pdev, hw->ci->cfg_bar); start 659 drivers/isdn/hardware/mISDN/mISDNinfineon.c if (!request_mem_region(hw->cfg.start, hw->cfg.size, start 663 drivers/isdn/hardware/mISDN/mISDNinfineon.c if (!request_region(hw->cfg.start, hw->cfg.size, start 670 drivers/isdn/hardware/mISDN/mISDNinfineon.c (ulong)hw->cfg.start, (ulong)hw->cfg.size); start 674 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size); start 678 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->name, (ulong)hw->cfg.start, start 683 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->addr.start = pci_resource_start(hw->pdev, hw->ci->addr_bar); start 686 drivers/isdn/hardware/mISDN/mISDNinfineon.c if (!request_mem_region(hw->addr.start, hw->addr.size, start 690 drivers/isdn/hardware/mISDN/mISDNinfineon.c if (!request_region(hw->addr.start, hw->addr.size, start 697 drivers/isdn/hardware/mISDN/mISDNinfineon.c (ulong)hw->addr.start, (ulong)hw->addr.size); start 701 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->addr.p = ioremap(hw->addr.start, hw->addr.size); start 708 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->name, (ulong)hw->addr.start, start 718 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->cfg.start + DIVA_ISAC_ALE; start 719 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.port = (u32)hw->cfg.start + DIVA_ISAC_PORT; start 721 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.ale = (u32)hw->cfg.start + DIVA_HSCX_ALE; start 722 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.port = (u32)hw->cfg.start + DIVA_HSCX_PORT; start 744 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE; start 745 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT; start 747 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE; start 748 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT; start 749 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(0xff, (ulong)hw->cfg.start); start 751 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(0x00, (ulong)hw->cfg.start); start 753 drivers/isdn/hardware/mISDN/mISDNinfineon.c outb(TIGER_IOMASK, (ulong)hw->cfg.start + TIGER_AUX_CTRL); start 759 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start; start 760 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.port = (u32)hw->addr.start + 1; start 762 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.ale = (u32)hw->addr.start; start 763 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.port = (u32)hw->addr.start + 1; start 769 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start + NICCY_ISAC_ALE; start 770 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.port = (u32)hw->addr.start + NICCY_ISAC_PORT; start 772 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.ale = (u32)hw->addr.start + NICCY_HSCX_ALE; start 773 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->hscx.a.io.port = (u32)hw->addr.start + NICCY_HSCX_PORT; start 778 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start; start 788 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start + 0x08; start 798 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start + 0x10; start 808 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start + 0x20; start 819 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.port = (u32)hw->addr.start; start 827 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.ale = (u32)hw->addr.start; start 828 drivers/isdn/hardware/mISDN/mISDNinfineon.c hw->isac.a.io.port = (u32)hw->addr.start + GAZEL_IPAC_DATA_PORT; start 31 drivers/isdn/hardware/mISDN/netjet.c u32 *start; start 188 drivers/isdn/hardware/mISDN/netjet.c val = card->send.start[idx]; start 191 drivers/isdn/hardware/mISDN/netjet.c card->send.start[idx++] = val; start 324 drivers/isdn/hardware/mISDN/netjet.c card->send.start = card->dma_p; start 336 drivers/isdn/hardware/mISDN/netjet.c card->send.dmaend, card->send.start, card->send.size); start 342 drivers/isdn/hardware/mISDN/netjet.c card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2); start 354 drivers/isdn/hardware/mISDN/netjet.c card->recv.dmaend, card->recv.start, card->recv.size); start 393 drivers/isdn/hardware/mISDN/netjet.c val = card->recv.start[idx++]; start 511 drivers/isdn/hardware/mISDN/netjet.c v = card->send.start[bc->idx]; start 514 drivers/isdn/hardware/mISDN/netjet.c card->send.start[bc->idx++] = v; start 574 drivers/isdn/hardware/mISDN/netjet.c v = card->send.start[bc->idx]; start 577 drivers/isdn/hardware/mISDN/netjet.c card->send.start[bc->idx++] = v; start 583 drivers/isdn/hardware/mISDN/netjet.c v = card->send.start[bc->idx]; start 587 drivers/isdn/hardware/mISDN/netjet.c card->send.start[bc->idx++] = v; start 377 drivers/isdn/mISDN/dsp_tones.c int index, count, start, num; start 412 drivers/isdn/mISDN/dsp_tones.c start = count % (*(pat->siz[index])); start 416 drivers/isdn/mISDN/dsp_tones.c if (num + start > (*(pat->siz[index]))) start 417 drivers/isdn/mISDN/dsp_tones.c num = (*(pat->siz[index])) - start; start 419 drivers/isdn/mISDN/dsp_tones.c memcpy(data, pat->data[index] + start, num); start 160 drivers/leds/leds-88pm860x.c data->reg_control = res->start; start 166 drivers/leds/leds-88pm860x.c data->reg_blink = res->start; start 45 drivers/leds/leds-cobalt-qube.c led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 75 drivers/leds/leds-cobalt-raq.c led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 161 drivers/leds/leds-lp5521.c static void lp5521_run_engine(struct lp55xx_chip *chip, bool start) start 168 drivers/leds/leds-lp5521.c if (!start) { start 217 drivers/leds/leds-lp5523.c static void lp5523_run_engine(struct lp55xx_chip *chip, bool start) start 224 drivers/leds/leds-lp5523.c if (!start) { start 153 drivers/leds/leds-lp5562.c static void lp5562_run_engine(struct lp55xx_chip *chip, bool start) start 160 drivers/leds/leds-lp5562.c if (!start) { start 274 drivers/leds/leds-lp55xx-common.c static inline void lp55xx_run_engine(struct lp55xx_chip *chip, bool start) start 277 drivers/leds/leds-lp55xx-common.c chip->cfg->run_engine(chip, start); start 119 drivers/leds/leds-lp55xx-common.h void (*run_engine) (struct lp55xx_chip *chip, bool start); start 156 drivers/leds/leds-lp8501.c static void lp8501_run_engine(struct lp55xx_chip *chip, bool start) start 163 drivers/leds/leds-lp8501.c if (!start) { start 146 drivers/leds/leds-nic78bx.c if (!devm_request_region(dev, io_rc->start, resource_size(io_rc), start 152 drivers/leds/leds-nic78bx.c led_data->io_base = io_rc->start; start 145 drivers/leds/leds-sunfire.c p->leds[i].reg = (void __iomem *) pdev->resource[0].start; start 230 drivers/leds/leds-wm831x-status.c drvdata->reg = res->start; start 108 drivers/macintosh/macio-adb.c adb = ioremap(r.start, sizeof(struct adb_regs)); start 190 drivers/macintosh/macio_asic.c res->end = res->start + 0x1ffff; start 198 drivers/macintosh/macio_asic.c if ((res->start & 0x0001f000) == 0x00008000) start 199 drivers/macintosh/macio_asic.c res->end = res->start + 0xff; start 221 drivers/macintosh/macio_asic.c if (index == 0 && (res->end - res->start) > 0xfff) start 222 drivers/macintosh/macio_asic.c res->end = res->start + 0xfff; start 223 drivers/macintosh/macio_asic.c if (index == 1 && (res->end - res->start) > 0xff) start 224 drivers/macintosh/macio_asic.c res->end = res->start + 0xff; start 236 drivers/macintosh/macio_asic.c dev->interrupt[index].start = irq; start 300 drivers/macintosh/macio_asic.c res->start = irq; start 440 drivers/macintosh/rack-meter.c pr_debug(" i2s @0x%08x\n", (unsigned int)ri2s.start); start 441 drivers/macintosh/rack-meter.c pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start); start 464 drivers/macintosh/rack-meter.c rm->i2s_regs = ioremap(ri2s.start, 0x1000); start 475 drivers/macintosh/rack-meter.c rm->dma_regs = ioremap(rdma.start, 0x100); start 1575 drivers/mailbox/bcm-pdc-mailbox.c &pdc_regs->start, &pdc_regs->end); start 366 drivers/mailbox/mailbox-test.c tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size); start 376 drivers/mailbox/mailbox-test.c tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size); start 134 drivers/mailbox/pl320-ipc.c ipc_base = ioremap(adev->res.start, resource_size(&adev->res)); start 505 drivers/mailbox/zynqmp-ipi-mailbox.c mchan->req_buf = devm_ioremap(mdev, res.start, start 521 drivers/mailbox/zynqmp-ipi-mailbox.c mchan->resp_buf = devm_ioremap(mdev, res.start, start 544 drivers/mailbox/zynqmp-ipi-mailbox.c mchan->req_buf = devm_ioremap(mdev, res.start, start 560 drivers/mailbox/zynqmp-ipi-mailbox.c mchan->resp_buf = devm_ioremap(mdev, res.start, start 451 drivers/mcb/mcb-core.c mem = request_mem_region(dev->mem.start, size, name); start 470 drivers/mcb/mcb-core.c release_mem_region(mem->start, size); start 480 drivers/mcb/mcb-core.c return irq->start; start 38 drivers/mcb/mcb-lpc.c res = devm_request_mem_region(&pdev->dev, priv->mem->start, start 46 drivers/mcb/mcb-lpc.c priv->base = devm_ioremap(&pdev->dev, priv->mem->start, start 59 drivers/mcb/mcb-lpc.c ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base); start 109 drivers/mcb/mcb-lpc.c .start = 0xe000e000, start 115 drivers/mcb/mcb-lpc.c .start = 0xf000e000, start 93 drivers/mcb/mcb-parse.c mdev->irq.start = GDD_IRQ(reg1); start 97 drivers/mcb/mcb-parse.c mdev->mem.start = dev_mapbase + offset; start 99 drivers/mcb/mcb-parse.c mdev->mem.end = mdev->mem.start + size - 1; start 240 drivers/md/bcache/bcache.h struct bkey start; start 25 drivers/md/bcache/bset.c for (k = i->start; k < bset_bkey_last(i); k = next) { start 473 drivers/md/bcache/bset.c ktime_t start = ktime_get(); start 484 drivers/md/bcache/bset.c done / ktime_us_delta(ktime_get(), start)); start 600 drivers/md/bcache/bset.c ? t->data->start start 662 drivers/md/bcache/bset.c t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); start 695 drivers/md/bcache/bset.c struct bkey *prev = NULL, *k = t->data->start; start 755 drivers/md/bcache/bset.c if (k == t->data->start) start 1012 drivers/md/bcache/bset.c l = t->data->start; start 1039 drivers/md/bcache/bset.c i.l = t->data->start; start 1052 drivers/md/bcache/bset.c if (unlikely(bkey_cmp(search, t->data->start) < 0)) start 1053 drivers/md/bcache/bset.c return t->data->start; start 1065 drivers/md/bcache/bset.c i.l != t->data->start && start 1110 drivers/md/bcache/bset.c struct bset_tree *start) start 1121 drivers/md/bcache/bset.c for (; start <= bset_tree_last(b); start++) { start 1122 drivers/md/bcache/bset.c ret = bch_bset_search(b, start, search); start 1123 drivers/md/bcache/bset.c bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); start 1229 drivers/md/bcache/bset.c last = out->start; start 1243 drivers/md/bcache/bset.c unsigned int start, unsigned int order, bool fixup, start 1264 drivers/md/bcache/bset.c b->nsets = start; start 1266 drivers/md/bcache/bset.c if (!start && order == b->page_order) { start 1278 drivers/md/bcache/bset.c b->set[start].data->keys = out->keys; start 1279 drivers/md/bcache/bset.c memcpy(b->set[start].data->start, out->start, start 1280 drivers/md/bcache/bset.c (void *) bset_bkey_last(out) - (void *) out->start); start 1290 drivers/md/bcache/bset.c if (!start) start 1294 drivers/md/bcache/bset.c void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, start 1301 drivers/md/bcache/bset.c __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); start 1303 drivers/md/bcache/bset.c if (start) { start 1306 drivers/md/bcache/bset.c for (i = start; i <= b->nsets; i++) start 1312 drivers/md/bcache/bset.c __btree_sort(b, &iter, start, order, false, state); start 246 drivers/md/bcache/bset.h return !b->last_set_unwritten || k < b->set[b->nsets].data->start; start 350 drivers/md/bcache/bset.h return search ? __bch_bset_search(b, t, search) : t->data->start; start 381 drivers/md/bcache/bset.h void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, start 405 drivers/md/bcache/bset.h return bkey_idx(i->start, idx); start 258 drivers/md/bcache/btree.c bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); start 1422 drivers/md/bcache/btree.c for (k = n2->start; start 1459 drivers/md/bcache/btree.c n2->start, start 1460 drivers/md/bcache/btree.c (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); start 1465 drivers/md/bcache/btree.c memmove(n2->start, start 2140 drivers/md/bcache/btree.c memcpy(btree_bset_first(n2)->start, start 2526 drivers/md/bcache/btree.c struct bkey start = buf->last_scanned; start 2541 drivers/md/bcache/btree.c KEY_INODE(&start), KEY_OFFSET(&start), start 2551 drivers/md/bcache/btree.c buf->start = START_KEY(&w->key); start 2556 drivers/md/bcache/btree.c buf->start = MAX_KEY; start 2576 drivers/md/bcache/btree.c bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, start 2582 drivers/md/bcache/btree.c s.key = *start; start 2584 drivers/md/bcache/btree.c if (bkey_cmp(end, &buf->start) <= 0 || start 2585 drivers/md/bcache/btree.c bkey_cmp(start, &buf->end) >= 0) start 325 drivers/md/bcache/btree.h bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, start 24 drivers/md/bcache/debug.c #define for_each_written_bset(b, start, i) \ start 25 drivers/md/bcache/debug.c for (i = (start); \ start 26 drivers/md/bcache/debug.c (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\ start 27 drivers/md/bcache/debug.c i->seq == (start)->seq; \ start 68 drivers/md/bcache/debug.c memcmp(inmemory->start, start 69 drivers/md/bcache/debug.c sorted->start, start 71 drivers/md/bcache/debug.c (void *) inmemory->start)) { start 331 drivers/md/bcache/journal.c for (k = i->j.start; start 365 drivers/md/bcache/journal.c uint64_t start = i->j.last_seq, end = i->j.seq, n = start; start 372 drivers/md/bcache/journal.c if (n == start && is_discard_enabled(s)) start 374 drivers/md/bcache/journal.c n, i->j.seq - 1, start, end); start 377 drivers/md/bcache/journal.c n, i->j.seq - 1, start, end); start 383 drivers/md/bcache/journal.c for (k = i->j.start; start 987 drivers/md/bcache/request.c struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); start 990 drivers/md/bcache/request.c bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); start 993 drivers/md/bcache/request.c if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { start 2544 drivers/md/bcache/super.c unsigned long start = jiffies; start 2600 drivers/md/bcache/super.c long timeout = start + 10 * HZ - jiffies; start 240 drivers/md/bcache/util.c goto start; start 244 drivers/md/bcache/util.c start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, start 619 drivers/md/bcache/writeback.c struct bkey start = KEY(dc->disk.id, 0, 0); start 628 drivers/md/bcache/writeback.c if (bkey_cmp(&buf->last_scanned, &start) < 0 || start 630 drivers/md/bcache/writeback.c buf->last_scanned = start; start 648 drivers/md/bcache/writeback.c buf->last_scanned = start; start 759 drivers/md/bcache/writeback.c struct bkey start; start 777 drivers/md/bcache/writeback.c bkey_copy_key(&op->start, k); start 792 drivers/md/bcache/writeback.c op.start = KEY(op.inode, 0, 0); start 795 drivers/md/bcache/writeback.c ret = bch_btree_map_keys(&op.op, d->c, &op.start, start 106 drivers/md/dm-bufio.c sector_t start; start 646 drivers/md/dm-bufio.c sector += b->c->start; start 1180 drivers/md/dm-bufio.c unsigned start, unsigned end) start 1184 drivers/md/dm-bufio.c BUG_ON(start >= end); start 1192 drivers/md/dm-bufio.c b->dirty_start = start; start 1196 drivers/md/dm-bufio.c if (start < b->dirty_start) start 1197 drivers/md/dm-bufio.c b->dirty_start = start; start 1769 drivers/md/dm-bufio.c void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) start 1771 drivers/md/dm-bufio.c c->start = start; start 647 drivers/md/dm-clone-metadata.c unsigned long start, unsigned long nr_regions) start 654 drivers/md/dm-clone-metadata.c bit = find_next_zero_bit(cmd->region_map, cmd->nr_regions, start); start 656 drivers/md/dm-clone-metadata.c return (bit >= (start + nr_regions)); start 665 drivers/md/dm-clone-metadata.c unsigned long start) start 667 drivers/md/dm-clone-metadata.c return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start); start 881 drivers/md/dm-clone-metadata.c int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, start 888 drivers/md/dm-clone-metadata.c if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start || start 889 drivers/md/dm-clone-metadata.c (start + nr_regions) > cmd->nr_regions)) { start 891 drivers/md/dm-clone-metadata.c start, nr_regions, cmd->nr_regions); start 903 drivers/md/dm-clone-metadata.c for (region_nr = start; region_nr < (start + nr_regions); region_nr++) { start 51 drivers/md/dm-clone-metadata.h int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, start 154 drivers/md/dm-clone-metadata.h unsigned long start, unsigned long nr_regions); start 165 drivers/md/dm-clone-metadata.h unsigned long start); start 135 drivers/md/dm-crypt.c sector_t start; start 835 drivers/md/dm-crypt.c bip->bip_iter.bi_sector = io->cc->start + io->sector; start 1472 drivers/md/dm-crypt.c clone->bi_iter.bi_sector = cc->start + io->sector; start 1581 drivers/md/dm-crypt.c clone->bi_iter.bi_sector = cc->start + io->sector; start 2235 drivers/md/dm-crypt.c char *start, *end, *mac_alg = NULL; start 2241 drivers/md/dm-crypt.c start = strchr(cipher_api, '('); start 2243 drivers/md/dm-crypt.c if (!start || !end || ++start > end) start 2246 drivers/md/dm-crypt.c mac_alg = kzalloc(end - start + 1, GFP_KERNEL); start 2249 drivers/md/dm-crypt.c strncpy(mac_alg, start, end - start); start 2687 drivers/md/dm-crypt.c cc->start = tmpll; start 2762 drivers/md/dm-crypt.c bio->bi_iter.bi_sector = cc->start + start 2839 drivers/md/dm-crypt.c cc->dev->name, (unsigned long long)cc->start); start 2944 drivers/md/dm-crypt.c return fn(ti, cc->dev, cc->start, ti->len, data); start 22 drivers/md/dm-delay.c sector_t start; start 149 drivers/md/dm-delay.c c->start = tmpll; start 299 drivers/md/dm-delay.c bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector); start 305 drivers/md/dm-delay.c DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay) start 338 drivers/md/dm-delay.c ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data); start 341 drivers/md/dm-delay.c ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data); start 344 drivers/md/dm-delay.c ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data); start 30 drivers/md/dm-dust.c sector_t start; start 215 drivers/md/dm-dust.c bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); start 335 drivers/md/dm-dust.c dd->start = tmp; start 457 drivers/md/dm-dust.c (unsigned long long)dd->start, dd->blksz); start 472 drivers/md/dm-dust.c if (dd->start || start 484 drivers/md/dm-dust.c return fn(ti, dd->dev, dd->start, ti->len, data); start 28 drivers/md/dm-flakey.c sector_t start; start 220 drivers/md/dm-flakey.c fc->start = tmpll; start 275 drivers/md/dm-flakey.c return fc->start + dm_target_offset(ti, bi_sector); start 425 drivers/md/dm-flakey.c (unsigned long long)fc->start, fc->up_interval, start 456 drivers/md/dm-flakey.c if (fc->start || start 476 drivers/md/dm-flakey.c dm_remap_zone_report(ti, fc->start, zones, nr_zones); start 485 drivers/md/dm-flakey.c return fn(ti, fc->dev, fc->start, ti->len, data); start 156 drivers/md/dm-integrity.c sector_t start; start 453 drivers/md/dm-integrity.c result += ic->start; start 486 drivers/md/dm-integrity.c io_loc.sector = ic->start; start 968 drivers/md/dm-integrity.c io_loc.sector = ic->start + SB_SECTORS + sector; start 2949 drivers/md/dm-integrity.c DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, start 2991 drivers/md/dm-integrity.c return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); start 3045 drivers/md/dm-integrity.c if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) start 3223 drivers/md/dm-integrity.c unsigned start = 0, end = PAGE_SIZE; start 3225 drivers/md/dm-integrity.c start = start_offset; start 3228 drivers/md/dm-integrity.c sg_set_buf(&s[idx - start_index], va + start, end - start); start 3581 drivers/md/dm-integrity.c unsigned long long start; start 3618 drivers/md/dm-integrity.c if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { start 3623 drivers/md/dm-integrity.c ic->start = start; start 4020 drivers/md/dm-integrity.c dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); start 994 drivers/md/dm-ioctl.c geometry.start = indata[3]; start 23 drivers/md/dm-linear.c sector_t start; start 52 drivers/md/dm-linear.c lc->start = tmp; start 85 drivers/md/dm-linear.c return lc->start + dm_target_offset(ti, bi_sector); start 117 drivers/md/dm-linear.c (unsigned long long)lc->start); start 132 drivers/md/dm-linear.c if (lc->start || start 152 drivers/md/dm-linear.c dm_remap_zone_report(ti, lc->start, zones, nr_zones); start 162 drivers/md/dm-linear.c return fn(ti, lc->dev, lc->start, ti->len, data); start 47 drivers/md/dm-stats.c sector_t start; start 221 drivers/md/dm-stats.c (unsigned long long)s->start, start 234 drivers/md/dm-stats.c static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, start 255 drivers/md/dm-stats.c if (end < start || !step) start 258 drivers/md/dm-stats.c n_entries = end - start; start 287 drivers/md/dm-stats.c s->start = start; start 457 drivers/md/dm-stats.c len = s->end - s->start; start 459 drivers/md/dm-stats.c (unsigned long long)s->start, start 592 drivers/md/dm-stats.c if (end_sector <= s->start || bi_sector >= s->end) start 594 drivers/md/dm-stats.c if (unlikely(bi_sector < s->start)) { start 596 drivers/md/dm-stats.c todo = end_sector - s->start; start 598 drivers/md/dm-stats.c rel_sector = bi_sector - s->start; start 806 drivers/md/dm-stats.c sector_t start, end, step; start 832 drivers/md/dm-stats.c start = s->start + (step * idx_start); start 834 drivers/md/dm-stats.c for (x = idx_start; x < idx_end; x++, start = end) { start 836 drivers/md/dm-stats.c end = start + step; start 843 drivers/md/dm-stats.c (unsigned long long)start, start 951 drivers/md/dm-stats.c unsigned long long start, end, len, step; start 977 drivers/md/dm-stats.c start = 0; start 981 drivers/md/dm-stats.c } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 || start 982 drivers/md/dm-stats.c start != (sector_t)start || len != (sector_t)len) start 985 drivers/md/dm-stats.c end = start + len; start 986 drivers/md/dm-stats.c if (start >= end) start 993 drivers/md/dm-stats.c step = end - start; start 1049 drivers/md/dm-stats.c id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags, start 77 drivers/md/dm-stripe.c unsigned long long start; start 81 drivers/md/dm-stripe.c if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1) start 89 drivers/md/dm-stripe.c sc->stripe[stripe].physical_start = start; start 32 drivers/md/dm-switch.c sector_t start; start 207 drivers/md/dm-switch.c unsigned long long start; start 217 drivers/md/dm-switch.c if (kstrtoull(dm_shift_arg(as), 10, &start) || start != (sector_t)start) { start 223 drivers/md/dm-switch.c sctx->path_list[sctx->nr_paths].start = start; start 326 drivers/md/dm-switch.c bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; start 505 drivers/md/dm-switch.c (unsigned long long)sctx->path_list[path_nr].start); start 527 drivers/md/dm-switch.c if (ti->len + sctx->path_list[path_nr].start != start 542 drivers/md/dm-switch.c sctx->path_list[path_nr].start, ti->len, data); start 280 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 301 drivers/md/dm-table.c (unsigned long long)start, start 310 drivers/md/dm-table.c if ((start >= dev_size) || (start + len > dev_size)) { start 314 drivers/md/dm-table.c (unsigned long long)start, start 327 drivers/md/dm-table.c if (start & (zone_sectors - 1)) { start 330 drivers/md/dm-table.c (unsigned long long)start, start 356 drivers/md/dm-table.c if (start & (logical_block_size_sectors - 1)) { start 360 drivers/md/dm-table.c (unsigned long long)start, start 468 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 481 drivers/md/dm-table.c if (bdev_stack_limits(limits, bdev, start) < 0) start 489 drivers/md/dm-table.c (unsigned long long) start << SECTOR_SHIFT); start 576 drivers/md/dm-table.c char *start, *end = input, *out, **argv = NULL; start 592 drivers/md/dm-table.c start = skip_spaces(end); start 594 drivers/md/dm-table.c if (!*start) start 598 drivers/md/dm-table.c end = out = start; start 626 drivers/md/dm-table.c argv[*argc] = start; start 709 drivers/md/dm-table.c sector_t start, sector_t len, char *params) start 767 drivers/md/dm-table.c tgt->begin = start; start 883 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 888 drivers/md/dm-table.c start, len); start 893 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 927 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1384 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1420 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1450 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1632 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1669 drivers/md/dm-table.c struct dm_dev *dev, sector_t start, start 1700 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1708 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1733 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1747 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1774 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1801 drivers/md/dm-table.c sector_t start, sector_t len, void *data) start 1834 drivers/md/dm-table.c struct dm_dev *dev, sector_t start, start 1862 drivers/md/dm-table.c struct dm_dev *dev, sector_t start, start 42 drivers/md/dm-unstripe.c unsigned long long start; start 81 drivers/md/dm-unstripe.c if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { start 85 drivers/md/dm-unstripe.c uc->physical_start = start; start 71 drivers/md/dm-verity-fec.c res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf); start 75 drivers/md/dm-verity-fec.c (unsigned long long)(v->fec->start + block), start 538 drivers/md/dm-verity-fec.c (unsigned long long)v->fec->start, start 633 drivers/md/dm-verity-fec.c v->fec->start = num_ll; start 755 drivers/md/dm-verity-fec.c ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) { start 39 drivers/md/dm-verity-fec.h sector_t start; /* parity data start in blocks */ start 471 drivers/md/dm-verity-target.c struct bvec_iter start; start 509 drivers/md/dm-verity-target.c start = io->iter; start 526 drivers/md/dm-verity-target.c cur_block, NULL, &start) == 0) start 790 drivers/md/dm-writecache.c static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end) start 795 drivers/md/dm-writecache.c e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ); start 1099 drivers/md/dm-zoned-metadata.c if (blkz->start + blkz->len == dev->capacity) start 1124 drivers/md/dm-zoned-metadata.c zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); start 1270 drivers/md/dm-zoned-metadata.c zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start); start 347 drivers/md/dm-zoned-reclaim.c unsigned long start; start 355 drivers/md/dm-zoned-reclaim.c start = jiffies; start 411 drivers/md/dm-zoned-reclaim.c dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); start 875 drivers/md/dm.c if (geo->start > sz) { start 1109 drivers/md/dm.c int blocksize, sector_t start, sector_t len) start 1223 drivers/md/dm.c void dm_remap_zone_report(struct dm_target *ti, sector_t start, start 1239 drivers/md/dm.c if (zone->start >= start + ti->len) { start 1244 drivers/md/dm.c zone->start = zone->start + ti->begin - start; start 1249 drivers/md/dm.c zone->wp = zone->start + zone->len; start 1251 drivers/md/dm.c zone->wp = zone->start; start 1253 drivers/md/dm.c zone->wp = zone->wp + ti->begin - start; start 3085 drivers/md/dm.c sector_t start, sector_t len, void *data) start 78 drivers/md/dm.h sector_t start, sector_t len, void *data); start 1059 drivers/md/md-bitmap.c static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) start 1081 drivers/md/md-bitmap.c >= start); start 1162 drivers/md/md-bitmap.c >= start); start 1902 drivers/md/md-bitmap.c sector_t start = 0; start 1932 drivers/md/md-bitmap.c start = mddev->recovery_cp; start 1935 drivers/md/md-bitmap.c err = md_bitmap_init_from_disk(bitmap, start); start 2200 drivers/md/md-bitmap.c sector_t start = block >> chunkshift; start 2201 drivers/md/md-bitmap.c start <<= chunkshift; start 2202 drivers/md/md-bitmap.c while (start < end) { start 2204 drivers/md/md-bitmap.c start += 1 << chunkshift; start 100 drivers/md/md-faulty.c static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir) start 105 drivers/md/md-faulty.c if (conf->faults[i] >= start && start 124 drivers/md/md-faulty.c static void add_sector(struct faulty_conf *conf, sector_t start, int mode) start 129 drivers/md/md-faulty.c if (conf->faults[i] == start) { start 158 drivers/md/md-faulty.c conf->faults[n] = start; start 561 drivers/md/md.c ktime_t start = ktime_get_boottime(); start 565 drivers/md/md.c ktime_after(mddev->last_flush, start), start 567 drivers/md/md.c if (!ktime_after(mddev->last_flush, start)) { start 675 drivers/md/md.c int start = next_minor; start 683 drivers/md/md.c if (next_minor == start) { start 5901 drivers/md/md.c if (mddev->pers->start) { start 5904 drivers/md/md.c ret = mddev->pers->start(mddev); start 8100 drivers/md/md.c .start = md_seq_start, start 560 drivers/md/md.h int (*start)(struct mddev *mddev); start 481 drivers/md/raid0.c sector_t start = bio->bi_iter.bi_sector; start 491 drivers/md/raid0.c zone = find_zone(conf, &start); start 510 drivers/md/raid0.c first_stripe_index = start; start 515 drivers/md/raid0.c start_disk_index = (int)(start - first_stripe_index * stripe_size) / start 517 drivers/md/raid0.c start_disk_offset = ((int)(start - first_stripe_index * stripe_size) % start 2010 drivers/md/raid1.c int start; start 2066 drivers/md/raid1.c start = d; start 2082 drivers/md/raid1.c d = start; start 2252 drivers/md/raid1.c int start; start 2293 drivers/md/raid1.c start = d; start 2310 drivers/md/raid1.c d = start; start 2368 drivers/md/raid10.c int start; start 2424 drivers/md/raid10.c start = sl; start 2462 drivers/md/raid10.c sl = start; start 258 drivers/md/raid5-cache.c static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) start 260 drivers/md/raid5-cache.c start += inc; start 261 drivers/md/raid5-cache.c if (start >= log->device_size) start 262 drivers/md/raid5-cache.c start = start - log->device_size; start 263 drivers/md/raid5-cache.c return start; start 266 drivers/md/raid5-cache.c static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, start 269 drivers/md/raid5-cache.c if (end >= start) start 270 drivers/md/raid5-cache.c return end - start; start 272 drivers/md/raid5-cache.c return end + log->device_size - start; start 8416 drivers/md/raid5.c .start = raid5_start, start 8441 drivers/md/raid5.c .start = raid5_start, start 8467 drivers/md/raid5.c .start = raid5_start, start 12 drivers/media/common/btcx-risc.h int start; start 558 drivers/media/common/saa7146/saa7146_fops.c vbi->start[0] = 5; start 560 drivers/media/common/saa7146/saa7146_fops.c vbi->start[1] = 312; start 441 drivers/media/common/tveeprom.c int i, j, len, done, beenhere, tag, start; start 462 drivers/media/common/tveeprom.c start = 0xa0; /* Generic em28xx offset */ start 467 drivers/media/common/tveeprom.c start = 8; /* Generic cx2388x offset */ start 472 drivers/media/common/tveeprom.c start = 8; /* Generic cx23418 offset (models 74xxx) */ start 474 drivers/media/common/tveeprom.c start = 0; start 476 drivers/media/common/tveeprom.c for (i = start; !done && i < 256; i += len) { start 36 drivers/media/common/videobuf2/videobuf2-memops.c struct frame_vector *vb2_create_framevec(unsigned long start, start 45 drivers/media/common/videobuf2/videobuf2-memops.c first = start >> PAGE_SHIFT; start 46 drivers/media/common/videobuf2/videobuf2-memops.c last = (start + length - 1) >> PAGE_SHIFT; start 51 drivers/media/common/videobuf2/videobuf2-memops.c ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec); start 276 drivers/media/dvb-core/dvb_ca_en50221.c unsigned long start; start 281 drivers/media/dvb-core/dvb_ca_en50221.c start = jiffies; start 294 drivers/media/dvb-core/dvb_ca_en50221.c __func__, jiffies - start); start 306 drivers/media/dvb-core/dvb_ca_en50221.c dprintk("%s failed timeout:%lu\n", __func__, jiffies - start); start 525 drivers/media/dvb-core/dvb_demux.c int start = pos, lost; start 534 drivers/media/dvb-core/dvb_demux.c lost = pos - start; start 676 drivers/media/dvb-frontends/cx24123.c static int cx24123_repeater_mode(struct cx24123_state *state, u8 mode, u8 start) start 680 drivers/media/dvb-frontends/cx24123.c r |= (1 << 6) | (start << 5); start 682 drivers/media/dvb-frontends/cx24123.c r |= (1 << 7) | (start); start 20 drivers/media/dvb-frontends/cxd2880/cxd2880_integ.c ktime_t start; start 30 drivers/media/dvb-frontends/cxd2880/cxd2880_integ.c start = ktime_get(); start 42 drivers/media/dvb-frontends/cxd2880/cxd2880_integ.c if (ktime_to_ms(ktime_sub(ktime_get(), start)) > start 6388 drivers/media/dvb-frontends/drxk_hard.c start(state, 0, IF); start 368 drivers/media/dvb-frontends/or51132.c start: start 386 drivers/media/dvb-frontends/or51132.c goto start; start 465 drivers/media/dvb-frontends/or51132.c start: start 494 drivers/media/dvb-frontends/or51132.c if (retry--) goto start; start 334 drivers/media/dvb-frontends/si21xx.c unsigned long start = jiffies; start 339 drivers/media/dvb-frontends/si21xx.c if (jiffies - start > timeout) { start 671 drivers/media/dvb-frontends/stb0899_drv.c unsigned long start = jiffies; start 677 drivers/media/dvb-frontends/stb0899_drv.c if (time_after(jiffies, start + timeout)) { start 715 drivers/media/dvb-frontends/stb0899_drv.c unsigned long start = jiffies; start 719 drivers/media/dvb-frontends/stb0899_drv.c if (time_after(jiffies, start + timeout)) { start 764 drivers/media/dvb-frontends/stb0899_drv.c unsigned long start = jiffies; start 768 drivers/media/dvb-frontends/stb0899_drv.c if (time_after(jiffies, start + timeout)) { start 173 drivers/media/dvb-frontends/stb6100.c static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len) start 191 drivers/media/dvb-frontends/stb6100.c if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) { start 193 drivers/media/dvb-frontends/stb6100.c start, len); start 197 drivers/media/dvb-frontends/stb6100.c cmdbuf[0] = start; start 202 drivers/media/dvb-frontends/stb6100.c dprintk(verbose, FE_DEBUG, 1, " Write @ 0x%02x: [%d:%d]", state->config->tuner_address, start, len); start 204 drivers/media/dvb-frontends/stb6100.c dprintk(verbose, FE_DEBUG, 1, " %s: 0x%02x", stb6100_regnames[start + i], buf[i]); start 209 drivers/media/dvb-frontends/stb6100.c (unsigned int)state->config->tuner_address, start, len, rc); start 181 drivers/media/dvb-frontends/stv0299.c unsigned long start = jiffies; start 186 drivers/media/dvb-frontends/stv0299.c if (jiffies - start > timeout) { start 198 drivers/media/dvb-frontends/stv0299.c unsigned long start = jiffies; start 203 drivers/media/dvb-frontends/stv0299.c if (jiffies - start > timeout) { start 1295 drivers/media/dvb-frontends/stv0910.c stat = start(state, p); start 55 drivers/media/dvb-frontends/stv6110.c int start, int len) start 76 drivers/media/dvb-frontends/stv6110.c if (start + len > 8) start 80 drivers/media/dvb-frontends/stv6110.c cmdbuf[0] = start; start 96 drivers/media/dvb-frontends/stv6110.c int start, int len) start 100 drivers/media/dvb-frontends/stv6110.c u8 reg[] = { start }; start 125 drivers/media/dvb-frontends/stv6110.c memcpy(&priv->regs[start], regs, len); start 130 drivers/media/dvb-frontends/stv6110.c static int stv6110_read_reg(struct dvb_frontend *fe, int start) start 133 drivers/media/dvb-frontends/stv6110.c stv6110_read_regs(fe, buf, start, 1); start 51 drivers/media/dvb-frontends/stv6110x.c static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 data[], int len) start 71 drivers/media/dvb-frontends/stv6110x.c if (start + len > 8) start 74 drivers/media/dvb-frontends/stv6110x.c buf[0] = start; start 163 drivers/media/dvb-frontends/tda8083.c unsigned long start = jiffies; start 165 drivers/media/dvb-frontends/tda8083.c while (jiffies - start < timeout && start 237 drivers/media/firewire/firedtv-fw.c .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, start 63 drivers/media/i2c/ir-kbd-i2c.c int start, range, toggle, dev, code, ircode, vendor; start 73 drivers/media/i2c/ir-kbd-i2c.c start = (buf[offset] >> 7) & 1; start 84 drivers/media/i2c/ir-kbd-i2c.c if (!start) start 89 drivers/media/i2c/ir-kbd-i2c.c ircode = (start << 12) | (toggle << 11) | (dev << 6) | code; start 98 drivers/media/i2c/ir-kbd-i2c.c start, range, toggle, dev, code); start 241 drivers/media/i2c/s5c73m3/s5c73m3-core.c unsigned long start = jiffies; start 242 drivers/media/i2c/s5c73m3/s5c73m3-core.c unsigned long end = start + msecs_to_jiffies(2000); start 258 drivers/media/i2c/s5c73m3/s5c73m3-core.c jiffies_to_msecs(jiffies - start)); start 304 drivers/media/i2c/s5c73m3/s5c73m3-core.c unsigned long start = jiffies; start 337 drivers/media/i2c/s5c73m3/s5c73m3-core.c msg, jiffies_to_msecs(jiffies - start)); start 1421 drivers/media/i2c/s5k5baf.c static void s5k5baf_bound_range(u32 *start, u32 *len, u32 max) start 1425 drivers/media/i2c/s5k5baf.c if (*start + *len > max) start 1426 drivers/media/i2c/s5k5baf.c *start = max - *len; start 1427 drivers/media/i2c/s5k5baf.c *start &= ~1; start 205 drivers/media/pci/bt8xx/btcx-risc.c skips[skip].start = clips[clip].c.left; start 206 drivers/media/pci/bt8xx/btcx-risc.c if (skips[skip].start < 0) start 207 drivers/media/pci/bt8xx/btcx-risc.c skips[skip].start = 0; start 227 drivers/media/pci/bt8xx/btcx-risc.c pr_cont(" %d-%d", skips[skip].start, skips[skip].end); start 10 drivers/media/pci/bt8xx/btcx-risc.h int start; start 239 drivers/media/pci/bt8xx/bttv-risc.c int dwords, rc, line, maxy, start, end; start 287 drivers/media/pci/bt8xx/bttv-risc.c for (start = 0, skip = 0; start < ov->w.width; start = end) { start 291 drivers/media/pci/bt8xx/bttv-risc.c } else if (start < skips[skip].start) { start 293 drivers/media/pci/bt8xx/bttv-risc.c end = skips[skip].start; start 300 drivers/media/pci/bt8xx/bttv-risc.c ra = addr + (fmt->depth>>3)*start; start 304 drivers/media/pci/bt8xx/bttv-risc.c if (0 == start) start 308 drivers/media/pci/bt8xx/bttv-risc.c ri |= (fmt->depth>>3) * (end-start); start 83 drivers/media/pci/bt8xx/bttv-vbi.c fh->vbi_fmt.fmt.start[0], start 84 drivers/media/pci/bt8xx/bttv-vbi.c fh->vbi_fmt.fmt.start[1], start 120 drivers/media/pci/bt8xx/bttv-vbi.c skip_lines0 = max(0, (fh->vbi_fmt.fmt.start[0] start 123 drivers/media/pci/bt8xx/bttv-vbi.c skip_lines1 = max(0, (fh->vbi_fmt.fmt.start[1] start 269 drivers/media/pci/bt8xx/bttv-vbi.c s64 start, count; start 271 drivers/media/pci/bt8xx/bttv-vbi.c start = clamp(f->start[i], min_start, max_start); start 273 drivers/media/pci/bt8xx/bttv-vbi.c count = (s64) f->start[i] + f->count[i] - start; start 274 drivers/media/pci/bt8xx/bttv-vbi.c f->start[i] = start; start 276 drivers/media/pci/bt8xx/bttv-vbi.c max_end - start); start 286 drivers/media/pci/bt8xx/bttv-vbi.c f->start[0] = tvnorm->vbistart[0]; start 287 drivers/media/pci/bt8xx/bttv-vbi.c f->start[1] = tvnorm->vbistart[1]; start 338 drivers/media/pci/bt8xx/bttv-vbi.c start1 = frt->fmt.vbi.start[1] - tvnorm->vbistart[1] + start 347 drivers/media/pci/bt8xx/bttv-vbi.c end = max(frt->fmt.vbi.start[0], start1) * 2 + 2; start 391 drivers/media/pci/bt8xx/bttv-vbi.c new_start = frt->fmt.vbi.start[i] start 395 drivers/media/pci/bt8xx/bttv-vbi.c frt->fmt.vbi.start[i] = min(new_start, max_end - 1); start 398 drivers/media/pci/bt8xx/bttv-vbi.c max_end - frt->fmt.vbi.start[i]); start 419 drivers/media/pci/bt8xx/bttv-vbi.c f->fmt.start[0] = tvnorm->vbistart[0]; start 420 drivers/media/pci/bt8xx/bttv-vbi.c f->fmt.start[1] = tvnorm->vbistart[1]; start 108 drivers/media/pci/cobalt/cobalt-i2c.c struct i2c_adapter *adap, bool start, bool stop, start 121 drivers/media/pci/cobalt/cobalt-i2c.c if (i == 0 && start != 0) { start 166 drivers/media/pci/cobalt/cobalt-i2c.c struct i2c_adapter *adap, bool start, bool stop, start 176 drivers/media/pci/cobalt/cobalt-i2c.c if (i == 0 && start != 0) { start 492 drivers/media/pci/cx18/cx18-driver.h u32 start[2]; /* First VBI data line per field: 10 & 273 or 6 & 318 */ start 289 drivers/media/pci/cx18/cx18-fileops.c const char *start = buf->buf + buf->readpos; start 290 drivers/media/pci/cx18/cx18-fileops.c const char *p = start + 1; start 295 drivers/media/pci/cx18/cx18-fileops.c while (start + len > p) { start 297 drivers/media/pci/cx18/cx18-fileops.c q = memchr(p, 0, start + len - p); start 344 drivers/media/pci/cx18/cx18-fileops.c len = (char *)q - start; start 168 drivers/media/pci/cx18/cx18-ioctl.c vbifmt->start[0] = cx->vbi.start[0]; start 169 drivers/media/pci/cx18/cx18-ioctl.c vbifmt->start[1] = cx->vbi.start[1]; start 592 drivers/media/pci/cx18/cx18-ioctl.c cx->vbi.start[0] = cx->is_50hz ? 6 : 10; start 593 drivers/media/pci/cx18/cx18-ioctl.c cx->vbi.start[1] = cx->is_50hz ? 318 : 273; start 546 drivers/media/pci/cx18/cx18-streams.c cx->vbi.start[0] = 10; start 547 drivers/media/pci/cx18/cx18-streams.c cx->vbi.start[1] = 273; start 550 drivers/media/pci/cx18/cx18-streams.c cx->vbi.start[0] = 6; start 551 drivers/media/pci/cx18/cx18-streams.c cx->vbi.start[1] = 318; start 48 drivers/media/pci/cx23885/cx23885-vbi.c f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9; start 49 drivers/media/pci/cx23885/cx23885-vbi.c f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9; start 54 drivers/media/pci/cx23885/cx23885-vbi.c f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5; start 55 drivers/media/pci/cx23885/cx23885-vbi.c f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5; start 35 drivers/media/pci/cx88/cx88-vbi.c f->fmt.vbi.start[0] = 10; start 36 drivers/media/pci/cx88/cx88-vbi.c f->fmt.vbi.start[1] = 273; start 43 drivers/media/pci/cx88/cx88-vbi.c f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5; start 44 drivers/media/pci/cx88/cx88-vbi.c f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5; start 399 drivers/media/pci/ddbridge/ddbridge-sx8.c stat = start(fe, 3, mask, ts_config); start 1909 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start) start 1914 drivers/media/pci/intel/ipu3/ipu3-cio2.c { 0, start - 1 }, start 1915 drivers/media/pci/intel/ipu3/ipu3-cio2.c { start, elems - 1 }, start 551 drivers/media/pci/ivtv/ivtv-driver.h u32 start[2]; /* start of first VBI line in the odd/even fields */ start 295 drivers/media/pci/ivtv/ivtv-fileops.c const char *start = buf->buf + buf->readpos; start 296 drivers/media/pci/ivtv/ivtv-fileops.c const char *p = start + 1; start 301 drivers/media/pci/ivtv/ivtv-fileops.c while (start + len > p && (q = memchr(p, 0, start + len - p))) { start 327 drivers/media/pci/ivtv/ivtv-fileops.c len = (char *)q - start; start 247 drivers/media/pci/ivtv/ivtv-ioctl.c dc->start.speed = ivtv_validate_speed(itv->speed, dc->start.speed); start 248 drivers/media/pci/ivtv/ivtv-ioctl.c if (dc->start.speed < 0) start 249 drivers/media/pci/ivtv/ivtv-ioctl.c dc->start.format = V4L2_DEC_START_FMT_GOP; start 251 drivers/media/pci/ivtv/ivtv-ioctl.c dc->start.format = V4L2_DEC_START_FMT_NONE; start 252 drivers/media/pci/ivtv/ivtv-ioctl.c if (dc->start.speed != 500 && dc->start.speed != 1500) start 253 drivers/media/pci/ivtv/ivtv-ioctl.c dc->flags = dc->start.speed == 1000 ? 0 : start 264 drivers/media/pci/ivtv/ivtv-ioctl.c return ivtv_start_decoding(id, dc->start.speed); start 368 drivers/media/pci/ivtv/ivtv-ioctl.c vbifmt->start[0] = itv->vbi.start[0]; start 369 drivers/media/pci/ivtv/ivtv-ioctl.c vbifmt->start[1] = itv->vbi.start[1]; start 1106 drivers/media/pci/ivtv/ivtv-ioctl.c itv->vbi.start[0] = itv->is_50hz ? 6 : 10; start 1107 drivers/media/pci/ivtv/ivtv-ioctl.c itv->vbi.start[1] = itv->is_50hz ? 318 : 273; start 181 drivers/media/pci/meye/meye.c static void ptable_copy(u8 *buf, int start, int size, int pt_pages) start 186 drivers/media/pci/meye/meye.c memcpy(buf + i, meye.mchip_ptable[start++], PAGE_SIZE); start 187 drivers/media/pci/meye/meye.c if (start >= pt_pages) start 188 drivers/media/pci/meye/meye.c start = 0; start 190 drivers/media/pci/meye/meye.c memcpy(buf + i, meye.mchip_ptable[start], size % PAGE_SIZE); start 1440 drivers/media/pci/meye/meye.c unsigned long start = vma->vm_start; start 1467 drivers/media/pci/meye/meye.c if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { start 1471 drivers/media/pci/meye/meye.c start += PAGE_SIZE; start 137 drivers/media/pci/ngene/ngene.h u64 start; start 338 drivers/media/pci/saa7134/saa7134-input.c unsigned int start = 0,parity = 0,code = 0; start 349 drivers/media/pci/saa7134/saa7134-input.c for (start = 0; start < ARRAY_SIZE(b); start++) { start 350 drivers/media/pci/saa7134/saa7134-input.c if (b[start] == marker) { start 351 drivers/media/pci/saa7134/saa7134-input.c code=b[(start+parity_offset + 1) % 4]; start 352 drivers/media/pci/saa7134/saa7134-input.c parity=b[(start+parity_offset) % 4]; start 1236 drivers/media/pci/saa7134/saa7134-video.c f->fmt.vbi.start[0] = norm->vbi_v_start_0; start 1238 drivers/media/pci/saa7134/saa7134-video.c f->fmt.vbi.start[1] = norm->vbi_v_start_1; start 405 drivers/media/pci/saa7164/saa7164-vbi.c f->fmt.vbi.start[0] = 10; start 407 drivers/media/pci/saa7164/saa7164-vbi.c f->fmt.vbi.start[1] = 263 + 10 + 1; start 505 drivers/media/pci/solo6x10/solo6x10-tw28.c int start, int n) start 507 drivers/media/pci/solo6x10/solo6x10-tw28.c for (; start < n; start++, vals++) { start 509 drivers/media/pci/solo6x10/solo6x10-tw28.c switch (start) { start 516 drivers/media/pci/solo6x10/solo6x10-tw28.c solo_i2c_writebyte(dev, SOLO_I2C_SAA, 0x46, start, *vals); start 291 drivers/media/pci/ttpci/av7110_hw.c unsigned long start; start 302 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 304 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_FREE); start 324 drivers/media/pci/ttpci/av7110_hw.c unsigned long start; start 337 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 339 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_FREE); start 354 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 356 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_SHAKE); start 394 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 396 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_FREE); start 429 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 431 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_FREE); start 537 drivers/media/pci/ttpci/av7110_hw.c unsigned long start; start 558 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 560 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_FREE); start 574 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 576 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_SHAKE); start 718 drivers/media/pci/ttpci/av7110_hw.c unsigned long start; start 723 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 725 drivers/media/pci/ttpci/av7110_hw.c err = time_after(jiffies, start + ARM_WAIT_OSD); start 743 drivers/media/pci/ttpci/av7110_hw.c unsigned long start; start 750 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 752 drivers/media/pci/ttpci/av7110_hw.c ret = time_after(jiffies, start + ARM_WAIT_OSD); start 764 drivers/media/pci/ttpci/av7110_hw.c start = jiffies; start 766 drivers/media/pci/ttpci/av7110_hw.c ret = time_after(jiffies, start + ARM_WAIT_SHAKE); start 327 drivers/media/platform/coda/coda-bit.c u32 start; start 389 drivers/media/platform/coda/coda-bit.c start = ctx->bitstream_fifo.kfifo.in; start 403 drivers/media/platform/coda/coda-bit.c meta->start = start; start 153 drivers/media/platform/coda/coda.h unsigned int start; start 312 drivers/media/platform/coda/imx-vdoa.c ret = devm_request_threaded_irq(&pdev->dev, res->start, NULL, start 92 drivers/media/platform/coda/trace.h __field(int, start) start 100 drivers/media/platform/coda/trace.h __entry->start = meta->start & ctx->bitstream_fifo.kfifo.mask; start 106 drivers/media/platform/coda/trace.h __entry->minor, __entry->index, __entry->start, __entry->end, start 123 drivers/media/platform/coda/trace.h __field(int, start) start 130 drivers/media/platform/coda/trace.h __entry->start = meta ? (meta->start & start 138 drivers/media/platform/coda/trace.h __entry->minor, __entry->start, __entry->end, __entry->ctx) start 880 drivers/media/platform/davinci/dm355_ccdc.c res = request_mem_region(res->start, resource_size(res), res->name); start 886 drivers/media/platform/davinci/dm355_ccdc.c ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); start 909 drivers/media/platform/davinci/dm355_ccdc.c release_mem_region(res->start, resource_size(res)); start 922 drivers/media/platform/davinci/dm355_ccdc.c release_mem_region(res->start, resource_size(res)); start 814 drivers/media/platform/davinci/dm644x_ccdc.c res = request_mem_region(res->start, resource_size(res), res->name); start 820 drivers/media/platform/davinci/dm644x_ccdc.c ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); start 830 drivers/media/platform/davinci/dm644x_ccdc.c release_mem_region(res->start, resource_size(res)); start 843 drivers/media/platform/davinci/dm644x_ccdc.c release_mem_region(res->start, resource_size(res)); start 1042 drivers/media/platform/davinci/isif.c res = request_mem_region(res->start, resource_size(res), start 1048 drivers/media/platform/davinci/isif.c addr = ioremap_nocache(res->start, resource_size(res)); start 1075 drivers/media/platform/davinci/isif.c release_mem_region(res->start, resource_size(res)); start 1086 drivers/media/platform/davinci/isif.c release_mem_region(res->start, resource_size(res)); start 1104 drivers/media/platform/davinci/isif.c release_mem_region(res->start, resource_size(res)); start 1416 drivers/media/platform/davinci/vpbe_display.c irq = res->start; start 1554 drivers/media/platform/davinci/vpbe_osd.c osd->osd_base_phys = res->start; start 1726 drivers/media/platform/davinci/vpfe_capture.c vpfe_dev->ccdc_irq0 = res1->start; start 1736 drivers/media/platform/davinci/vpfe_capture.c vpfe_dev->ccdc_irq1 = res1->start; start 323 drivers/media/platform/davinci/vpif.c u8 start, end; start 326 drivers/media/platform/davinci/vpif.c start = channel_id; start 329 drivers/media/platform/davinci/vpif.c for (i = start; i < end; i++) { start 1649 drivers/media/platform/davinci/vpif_capture.c err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr, start 1277 drivers/media/platform/davinci/vpif_display.c err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr, start 1171 drivers/media/platform/exynos-gsc/gsc-core.c ret = devm_request_irq(dev, res->start, gsc_irq_handler, start 987 drivers/media/platform/exynos4-is/fimc-core.c ret = devm_request_irq(dev, res->start, fimc_irq_handler, start 1506 drivers/media/platform/exynos4-is/fimc-lite.c ret = devm_request_irq(dev, res->start, flite_irq_handler, start 1405 drivers/media/platform/fsl-viu.c if (!devm_request_mem_region(&op->dev, r.start, start 1413 drivers/media/platform/fsl-viu.c viu_regs = devm_ioremap(&op->dev, r.start, sizeof(struct viu_reg)); start 1118 drivers/media/platform/marvell-ccic/mcam-core.c int start; start 1121 drivers/media/platform/marvell-ccic/mcam-core.c start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers); start 1126 drivers/media/platform/marvell-ccic/mcam-core.c if (start) start 313 drivers/media/platform/marvell-ccic/mmp-driver.c cam->irq = res->start; start 2386 drivers/media/platform/omap3isp/isp.c mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST]; start 30 drivers/media/platform/omap3isp/isph3a_aewb.c u32 start; start 52 drivers/media/platform/omap3isp/isph3a_aewb.c start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT; start 53 drivers/media/platform/omap3isp/isph3a_aewb.c start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT; start 64 drivers/media/platform/omap3isp/isph3a_aewb.c isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A, start 397 drivers/media/platform/omap3isp/ispvideo.c unsigned int start; start 423 drivers/media/platform/omap3isp/ispvideo.c start = isp_pipeline_ready(pipe); start 424 drivers/media/platform/omap3isp/ispvideo.c if (start) start 428 drivers/media/platform/omap3isp/ispvideo.c if (start) start 2464 drivers/media/platform/pxa_camera.c config.src_addr = pcdev->res->start + CIBR0 + i * 8; start 1125 drivers/media/platform/qcom/camss/camss-csid.c csid->irq = r->start; start 588 drivers/media/platform/qcom/camss/camss-csiphy.c csiphy->irq = r->start; start 1127 drivers/media/platform/qcom/camss/camss-ispif.c ispif->irq = r->start; start 2021 drivers/media/platform/qcom/camss/camss-vfe.c vfe->irq = r->start; start 99 drivers/media/platform/qcom/venus/firmware.c *mem_phys = r.start; start 107 drivers/media/platform/qcom/venus/firmware.c mem_va = memremap(r.start, *mem_size, MEMREMAP_WC); start 110 drivers/media/platform/qcom/venus/firmware.c &r.start, *mem_size); start 202 drivers/media/platform/rcar_drif.c resource_size_t start; /* I/O resource offset */ start 287 drivers/media/platform/rcar_drif.c dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR); start 1412 drivers/media/platform/rcar_drif.c ch->start = res->start; start 673 drivers/media/platform/s5p-g2d/g2d.c dev->irq = res->start; start 1298 drivers/media/platform/s5p-mfc/s5p_mfc.c dev->irq = res->start; start 43 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c unsigned int start, offset; start 48 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c start = bitmap_find_next_zero_area(dev->mem_bitmap, bits, 0, count, align); start 49 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c if (start > bits) start 52 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c bitmap_set(dev->mem_bitmap, start, count); start 53 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c offset = start << PAGE_SHIFT; start 102 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c unsigned int start = (b->dma - dev->mem_base) >> PAGE_SHIFT; start 105 drivers/media/platform/s5p-mfc/s5p_mfc_opr.c bitmap_clear(dev->mem_bitmap, start, count); start 1343 drivers/media/platform/sti/bdisp/bdisp-v4l2.c ret = devm_request_threaded_irq(dev, res->start, bdisp_irq_handler, start 491 drivers/media/platform/sti/hva/hva-h264.c const u8 start[] = { 0x00, 0x00, 0x00, 0x01 }; start 503 drivers/media/platform/sti/hva/hva-h264.c memcpy(addr + *size, start, sizeof(start)); start 504 drivers/media/platform/sti/hva/hva-h264.c *size += sizeof(start); start 524 drivers/media/platform/sti/hva/hva-h264.c const u8 start[] = { 0x00, 0x00, 0x00, 0x01 }; start 530 drivers/media/platform/sti/hva/hva-h264.c memcpy(addr + *size, start, sizeof(start)); start 531 drivers/media/platform/sti/hva/hva-h264.c *size += sizeof(start); start 322 drivers/media/platform/sti/hva/hva-hw.c hva->esram_addr = esram->start; start 313 drivers/media/platform/stm32/stm32-dcmi.c config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR; start 385 drivers/media/platform/stm32/stm32-dcmi.c u32 size, start; start 393 drivers/media/platform/stm32/stm32-dcmi.c start = ((dcmi->crop.top) << 16) | start 395 drivers/media/platform/stm32/stm32-dcmi.c reg_write(dcmi->regs, DCMI_CWSTRT, start); start 116 drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_reg.h #define CSI_CH_HSIZE_HOR_START(start) (((start) << 0) & CSI_CH_HSIZE_HOR_START_MASK) start 122 drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_reg.h #define CSI_CH_VSIZE_VER_START(start) (((start) << 0) & CSI_CH_VSIZE_VER_START_MASK) start 342 drivers/media/platform/tegra-cec/tegra_cec.c if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), start 354 drivers/media/platform/tegra-cec/tegra_cec.c cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start, start 381 drivers/media/platform/ti-vpe/cal.c cm->res->name, &cm->res->start, &cm->res->end); start 453 drivers/media/platform/ti-vpe/cal.c cc->res->name, &cc->res->start, &cc->res->end); start 487 drivers/media/platform/ti-vpe/cal.c cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start); start 494 drivers/media/platform/ti-vpe/cal.c &dev->ctx[0]->cc->res->start); start 503 drivers/media/platform/ti-vpe/cal.c &dev->ctx[1]->cc->res->start); start 511 drivers/media/platform/ti-vpe/cal.c &dev->cm->res->start); start 1836 drivers/media/platform/ti-vpe/cal.c dev->res->name, &dev->res->start, &dev->res->end); start 97 drivers/media/platform/ti-vpe/csc.c dev_dbg(dev, "CSC Registers @ %pa:\n", &csc->res->start); start 28 drivers/media/platform/ti-vpe/sc.c dev_dbg(dev, "SC Registers @ %pa:\n", &sc->res->start); start 1152 drivers/media/platform/ti-vpe/vpdma.c vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 508 drivers/media/platform/ti-vpe/vpe.c ((obj)->res->start - ctx->dev->res->start + reg) start 2507 drivers/media/platform/ti-vpe/vpe.c dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K); start 454 drivers/media/platform/vim2m.c int start, end, step; start 483 drivers/media/platform/vim2m.c start = height - 1; start 487 drivers/media/platform/vim2m.c start = 0; start 500 drivers/media/platform/vim2m.c for (y = start; y != end; y += step, y_out++) { start 519 drivers/media/platform/vim2m.c for (y = start; y != end; y += step, y_out++) { start 74 drivers/media/platform/vivid/vivid-vbi-cap.c vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5; start 75 drivers/media/platform/vivid/vivid-vbi-cap.c vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5; start 123 drivers/media/platform/vivid/vivid-vbi-gen.c line -= vbi_fmt->start[data->field]; start 150 drivers/media/platform/vivid/vivid-vbi-out.c vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5; start 151 drivers/media/platform/vivid/vivid-vbi-out.c vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5; start 817 drivers/media/platform/vsp1/vsp1_drv.c ret = devm_request_irq(&pdev->dev, irq->start, vsp1_irq_handler, start 97 drivers/media/platform/xilinx/xilinx-dma.c static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start) start 118 drivers/media/platform/xilinx/xilinx-dma.c ret = v4l2_subdev_call(subdev, video, s_stream, start); start 119 drivers/media/platform/xilinx/xilinx-dma.c if (start && ret < 0 && ret != -ENOIOCTLCMD) start 175 drivers/media/platform/xilinx/xilinx-dma.c struct xvip_dma *start) start 178 drivers/media/platform/xilinx/xilinx-dma.c struct media_entity *entity = &start->video.entity; start 51 drivers/media/rc/igorplugusb.c unsigned i, start, overflow; start 62 drivers/media/rc/igorplugusb.c i = start = overflow + HEADERLEN; start 64 drivers/media/rc/igorplugusb.c if (start >= len) { start 79 drivers/media/rc/igorplugusb.c } while (i != start); start 68 drivers/media/rc/iguanair.c uint16_t start; start 218 drivers/media/rc/iguanair.c ir->packet->header.start = 0; start 264 drivers/media/rc/iguanair.c ir->packet->header.start = 0; start 358 drivers/media/rc/iguanair.c ir->packet->header.start = 0; start 14 drivers/media/rc/img-ir/img-ir-rc5.c unsigned int addr, cmd, tgl, start; start 19 drivers/media/rc/img-ir/img-ir-rc5.c start = (raw >> 13) & 0x01; start 29 drivers/media/rc/img-ir/img-ir-rc5.c if (!start) start 235 drivers/media/rc/lirc_dev.c ktime_t start; start 341 drivers/media/rc/lirc_dev.c start = ktime_get(); start 356 drivers/media/rc/lirc_dev.c towait = ktime_us_delta(ktime_add_us(start, duration), start 270 drivers/media/spi/cxd2880-spi.c ktime_t start; start 286 drivers/media/spi/cxd2880-spi.c start = ktime_get(); start 304 drivers/media/spi/cxd2880-spi.c start = ktime_get(); start 306 drivers/media/spi/cxd2880-spi.c (ktime_to_ms(ktime_sub(ktime_get(), start)) >= 500)) { start 313 drivers/media/spi/cxd2880-spi.c start = ktime_get(); start 1596 drivers/media/usb/au0828/au0828-video.c format->fmt.vbi.start[0] = 21; start 1597 drivers/media/usb/au0828/au0828-video.c format->fmt.vbi.start[1] = 284; start 238 drivers/media/usb/cpia2/cpia2.h u8 start; start 450 drivers/media/usb/cpia2/cpia2.h u8 request, u8 start, u8 count, u8 direction); start 146 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_DEVICE_HI; start 152 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_DESCRIP_VID_HI; start 156 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VC_ASIC_ID; start 160 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_SENSOR_FLAGS; start 164 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_DEVICEH; start 173 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP4_EXPOSURE_TARGET; start 175 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP5_EXPOSURE_TARGET; start 183 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_YRANGE; start 192 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_SATURATION; start 194 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP5_MCUVSATURATION; start 202 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_GPIO_DATA; start 210 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_GPIO_DIRECTION; start 218 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VC_MP_DATA; start 226 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VC_MP_DIR; start 231 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_INT_PACKET_CTRL; start 241 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_FLICKER_MODES; start 246 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0; start 272 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; start 279 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; start 289 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP4_USER_MODE; start 291 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP5_USER_MODE; start 297 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP4_FRAMERATE_REQUEST; start 299 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP5_FRAMERATE_REQUEST; start 308 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VC_WAKEUP; start 316 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VC_PW_CTRL; start 321 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_SYSTEMSTATE; start 330 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; start 338 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_SYSTEMCTRL; start 346 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_EXPOSURE_MODES; start 354 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_DEVICE_CONFIG; start 361 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SYSTEM_VP_SERIAL_ADDR; start 367 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_SENSOR_CR1; start 375 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VC_VC_CTRL; start 399 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_REHASH_VALUES; start 411 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP4_USER_EFFECTS; start 413 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP5_USER_EFFECTS; start 528 drivers/media/usb/cpia2/cpia2_core.c u8 start; start 535 drivers/media/usb/cpia2/cpia2_core.c start = 0; start 543 drivers/media/usb/cpia2/cpia2_core.c start = cmd->start; start 551 drivers/media/usb/cpia2/cpia2_core.c start = 0; start 559 drivers/media/usb/cpia2/cpia2_core.c start = cmd->start; start 573 drivers/media/usb/cpia2/cpia2_core.c start, count, cmd->direction); start 580 drivers/media/usb/cpia2/cpia2_core.c DIR(cmd), start + i, buffer[i]); start 742 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0; start 899 drivers/media/usb/cpia2/cpia2_core.c u8 start, u8 datum) start 902 drivers/media/usb/cpia2/cpia2_core.c cmd->start = start; start 930 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0x0C; /* Data */ start 1845 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_RAM_ADDR_H; start 1858 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP_RAM_DATA; start 1906 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0; start 1918 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0x80; start 1929 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0xA0; /* ST_CTRL */ start 1935 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0xA4; /* Stream status */ start 1940 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0xA8; /* USB status */ start 1950 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0xAF; /* USB settings */ start 1956 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0xC0; /* VC stuff */ start 2011 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0; start 2043 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0x0E; start 2065 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0x1B; start 2071 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0x0E; start 2085 drivers/media/usb/cpia2/cpia2_core.c cmd.start = CPIA2_VP5_EXPOSURE_TARGET; start 2091 drivers/media/usb/cpia2/cpia2_core.c cmd.start = 0x3A; start 2394 drivers/media/usb/cpia2/cpia2_core.c unsigned long start = (unsigned long) adr; start 2410 drivers/media/usb/cpia2/cpia2_core.c if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) start 2412 drivers/media/usb/cpia2/cpia2_core.c start += PAGE_SIZE; start 398 drivers/media/usb/cpia2/cpia2_usb.c cmd.start = CPIA2_VC_USB_ISOLIM; start 408 drivers/media/usb/cpia2/cpia2_usb.c cmd.start = CPIA2_VC_USB_STRM; start 430 drivers/media/usb/cpia2/cpia2_usb.c cmd.start = CPIA2_VC_USB_STRM; start 533 drivers/media/usb/cpia2/cpia2_usb.c u8 request, u8 * registers, u16 start, size_t size) start 549 drivers/media/usb/cpia2/cpia2_usb.c start, /* value */ start 565 drivers/media/usb/cpia2/cpia2_usb.c u8 request, u8 * registers, u16 start, size_t size) start 581 drivers/media/usb/cpia2/cpia2_usb.c start, /* value */ start 602 drivers/media/usb/cpia2/cpia2_usb.c u8 request, u8 start, u8 count, u8 direction) start 618 drivers/media/usb/cpia2/cpia2_usb.c err = read_packet(udev, request, (u8 *)registers, start, count); start 622 drivers/media/usb/cpia2/cpia2_usb.c err =write_packet(udev, request, (u8 *)registers, start, count); start 626 drivers/media/usb/cpia2/cpia2_usb.c request, start); start 2603 drivers/media/usb/cx231xx/cx231xx-avcore.c int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) start 2636 drivers/media/usb/cx231xx/cx231xx-avcore.c if (start) { start 1597 drivers/media/usb/cx231xx/cx231xx-video.c f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ? start 1601 drivers/media/usb/cx231xx/cx231xx-video.c f->fmt.vbi.start[1] = (dev->norm & V4L2_STD_625_50) ? start 1621 drivers/media/usb/cx231xx/cx231xx-video.c f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ? start 1625 drivers/media/usb/cx231xx/cx231xx-video.c f->fmt.vbi.start[1] = (dev->norm & V4L2_STD_625_50) ? start 871 drivers/media/usb/cx231xx/cx231xx.h int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type); start 599 drivers/media/usb/dvb-usb-v2/lmedm04.c u16 j, wlen, len_in, start, end; start 616 drivers/media/usb/dvb-usb-v2/lmedm04.c start = (i == 1) ? 0 : 512; start 618 drivers/media/usb/dvb-usb-v2/lmedm04.c for (j = start; j < end; j += (packet_size+1)) { start 631 drivers/media/usb/em28xx/em28xx-core.c int em28xx_capture_start(struct em28xx *dev, int start) start 657 drivers/media/usb/em28xx/em28xx-core.c start ? EM2874_TS1_CAPTURE_ENABLE : 0x00, start 662 drivers/media/usb/em28xx/em28xx-core.c start ? EM2874_TS2_CAPTURE_ENABLE : 0x00, start 668 drivers/media/usb/em28xx/em28xx-core.c start ? 0x10 : 0x00, 0x10); start 672 drivers/media/usb/em28xx/em28xx-core.c if (start) { start 706 drivers/media/usb/em28xx/em28xx-core.c (!start ^ led->inverted) ? start 2073 drivers/media/usb/em28xx/em28xx-video.c format->fmt.vbi.start[0] = 10; start 2074 drivers/media/usb/em28xx/em28xx-video.c format->fmt.vbi.start[1] = 273; start 2077 drivers/media/usb/em28xx/em28xx-video.c format->fmt.vbi.start[0] = 6; start 2078 drivers/media/usb/em28xx/em28xx-video.c format->fmt.vbi.start[1] = 318; start 823 drivers/media/usb/em28xx/em28xx.h int em28xx_capture_start(struct em28xx *dev, int start); start 242 drivers/media/usb/gspca/benq.c .start = sd_start, start 921 drivers/media/usb/gspca/conex.c .start = sd_start, start 1863 drivers/media/usb/gspca/cpia1.c .start = sd_start, start 245 drivers/media/usb/gspca/dtcs033.c .start = dtcs033_start, start 747 drivers/media/usb/gspca/etoms.c .start = sd_start, start 267 drivers/media/usb/gspca/finepix.c .start = sd_start, start 179 drivers/media/usb/gspca/gl860/gl860.c .start = sd_start, start 191 drivers/media/usb/gspca/gl860/gl860.c .start = sd_start, start 203 drivers/media/usb/gspca/gl860/gl860.c .start = sd_start, start 215 drivers/media/usb/gspca/gl860/gl860.c .start = sd_start, start 840 drivers/media/usb/gspca/gspca.c ret = gspca_dev->sd_desc->start(gspca_dev); start 108 drivers/media/usb/gspca/gspca.h cam_op start; /* called on stream on after URBs creation */ start 492 drivers/media/usb/gspca/jeilinj.c .start = sd_start, start 503 drivers/media/usb/gspca/jeilinj.c .start = sd_start, start 491 drivers/media/usb/gspca/jl2005bcd.c .start = sd_start, start 418 drivers/media/usb/gspca/kinect.c .start = sd_start_video, start 430 drivers/media/usb/gspca/kinect.c .start = sd_start_depth, start 444 drivers/media/usb/gspca/konica.c .start = sd_start, start 289 drivers/media/usb/gspca/m5602/m5602_core.c if (sd->sensor->start) start 290 drivers/media/usb/gspca/m5602/m5602_core.c sd->sensor->start(sd); start 370 drivers/media/usb/gspca/m5602/m5602_core.c .start = m5602_start_transfer, start 123 drivers/media/usb/gspca/m5602/m5602_mt9m111.h .start = mt9m111_start, start 102 drivers/media/usb/gspca/m5602/m5602_ov7660.h .start = ov7660_start, start 150 drivers/media/usb/gspca/m5602/m5602_ov9650.h .start = ov9650_start, start 163 drivers/media/usb/gspca/m5602/m5602_po1030.h .start = po1030_start, start 80 drivers/media/usb/gspca/m5602/m5602_s5k4aa.h .start = s5k4aa_start, start 54 drivers/media/usb/gspca/m5602/m5602_s5k83a.h .start = s5k83a_start, start 60 drivers/media/usb/gspca/m5602/m5602_sensor.h int (*start)(struct sd *sd); start 395 drivers/media/usb/gspca/mars.c .start = sd_start, start 1043 drivers/media/usb/gspca/mr97310a.c .start = sd_start, start 2048 drivers/media/usb/gspca/nw80x.c .start = sd_start, start 4959 drivers/media/usb/gspca/ov519.c .start = sd_start, start 1573 drivers/media/usb/gspca/ov534.c .start = sd_start, start 1787 drivers/media/usb/gspca/ov534_9.c .start = sd_start, start 427 drivers/media/usb/gspca/pac207.c .start = sd_start, start 897 drivers/media/usb/gspca/pac7302.c .start = sd_start, start 646 drivers/media/usb/gspca/pac7311.c .start = sd_start, start 678 drivers/media/usb/gspca/se401.c .start = sd_start, start 922 drivers/media/usb/gspca/sn9c2028.c .start = sd_start, start 2306 drivers/media/usb/gspca/sn9c20x.c .start = sd_start, start 1403 drivers/media/usb/gspca/sonixb.c .start = sd_start, start 2879 drivers/media/usb/gspca/sonixj.c .start = sd_start, start 402 drivers/media/usb/gspca/spca1528.c .start = sd_start, start 933 drivers/media/usb/gspca/spca500.c .start = sd_start, start 1997 drivers/media/usb/gspca/spca501.c .start = sd_start, start 756 drivers/media/usb/gspca/spca505.c .start = sd_start, start 565 drivers/media/usb/gspca/spca506.c .start = sd_start, start 1489 drivers/media/usb/gspca/spca508.c .start = sd_start, start 841 drivers/media/usb/gspca/spca561.c .start = sd_start_12a, start 853 drivers/media/usb/gspca/spca561.c .start = sd_start_72a, start 399 drivers/media/usb/gspca/sq905.c .start = sd_start, start 301 drivers/media/usb/gspca/sq905c.c .start = sd_start, start 1114 drivers/media/usb/gspca/sq930x.c .start = sd_start, start 402 drivers/media/usb/gspca/stk014.c .start = sd_start, start 641 drivers/media/usb/gspca/stk1135.c .start = sd_start, start 305 drivers/media/usb/gspca/stv0680.c .start = sd_start, start 294 drivers/media/usb/gspca/stv06xx/stv06xx.c err = sd->sensor->start(sd); start 538 drivers/media/usb/gspca/stv06xx/stv06xx.c .start = stv06xx_start, start 142 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h .start = hdcs_start, start 161 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h .start = hdcs_start, start 126 drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.h .start = pb0100_start, start 65 drivers/media/usb/gspca/stv06xx/stv06xx_sensor.h int (*start)(struct sd *sd); start 34 drivers/media/usb/gspca/stv06xx/stv06xx_st6422.h .start = st6422_start, start 190 drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.h .start = vv6410_start, start 975 drivers/media/usb/gspca/sunplus.c .start = sd_start, start 1006 drivers/media/usb/gspca/t613.c .start = sd_start, start 4931 drivers/media/usb/gspca/topro.c .start = sd_start, start 652 drivers/media/usb/gspca/touptek.c .start = sd_start, start 328 drivers/media/usb/gspca/tv8532.c .start = sd_start, start 3790 drivers/media/usb/gspca/vc032x.c .start = sd_start, start 321 drivers/media/usb/gspca/vicam.c .start = sd_start, start 3067 drivers/media/usb/gspca/xirlink_cit.c .start = sd_start, start 3082 drivers/media/usb/gspca/xirlink_cit.c .start = sd_start, start 6935 drivers/media/usb/gspca/zc3xx.c .start = sd_start, start 96 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c .start = { 0, 0 }, start 1005 drivers/media/usb/usbvision/usbvision-video.c start = vma->vm_start; start 1036 drivers/media/usb/usbvision/usbvision-video.c if (vm_insert_page(vma, start, vmalloc_to_page(pos))) { start 1040 drivers/media/usb/usbvision/usbvision-video.c start += PAGE_SIZE; start 449 drivers/media/usb/uvc/uvc_driver.c const unsigned char *start = buffer; start 712 drivers/media/usb/uvc/uvc_driver.c return buffer - start; start 345 drivers/media/v4l2-core/v4l2-ioctl.c vbi->start[0], vbi->start[1], start 672 drivers/media/v4l2-core/v4l2-ioctl.c p->start.speed, p->start.format); start 1150 drivers/media/v4l2-core/v4l2-mem2mem.c dc->start.speed = 0; start 1151 drivers/media/v4l2-core/v4l2-mem2mem.c dc->start.format = V4L2_DEC_START_FMT_NONE; start 915 drivers/memory/omap-gpmc.c res->start = base; start 931 drivers/memory/omap-gpmc.c res->start = 0; start 1001 drivers/memory/omap-gpmc.c r = adjust_resource(res, res->start & ~(size - 1), size); start 1011 drivers/memory/omap-gpmc.c r = gpmc_cs_set_memconf(cs, res->start, resource_size(res)); start 1019 drivers/memory/omap-gpmc.c *base = res->start; start 1475 drivers/memory/omap-gpmc.c gpmc_mem_root.start = GPMC_MEM_START; start 2099 drivers/memory/omap-gpmc.c ret = gpmc_cs_remap(cs, res.start); start 2102 drivers/memory/omap-gpmc.c cs, &res.start); start 2103 drivers/memory/omap-gpmc.c if (res.start < GPMC_MEM_START) { start 2362 drivers/memory/omap-gpmc.c phys_base = res->start; start 2375 drivers/memory/omap-gpmc.c gpmc->irq = res->start; start 61 drivers/memory/pl172.c u32 reg_offset, u32 max, int start) start 68 drivers/memory/pl172.c cycles = DIV_ROUND_UP(val * pl172->rate, NSEC_PER_MSEC) - start; start 79 drivers/memory/pl172.c dev_dbg(&adev->dev, "%s: %u cycle(s)\n", name, start + start 247 drivers/memory/pl172.c pl172->base = devm_ioremap(dev, adev->res.start, start 301 drivers/memory/ti-emif-pm.c emif_data->pm_data.ti_emif_base_addr_phys = res->start; start 460 drivers/memstick/core/memstick.c } else if (host->card->start) start 461 drivers/memstick/core/memstick.c host->card->start(host->card); start 2188 drivers/memstick/core/ms_block.c card->start = msb_start; start 1309 drivers/memstick/core/mspro_block.c card->start = mspro_block_start; start 127 drivers/mfd/88pm800.c .start = PM800_IRQ_RTC, start 145 drivers/mfd/88pm800.c .start = PM800_IRQ_ONKEY, start 60 drivers/mfd/88pm805.c .start = PM805_IRQ_MIC_DET, start 67 drivers/mfd/88pm805.c .start = PM805_IRQ_HP1_SHRT, start 74 drivers/mfd/88pm805.c .start = PM805_IRQ_HP2_SHRT, start 278 drivers/mfd/aat2870-core.c char *start = buf; start 289 drivers/mfd/aat2870-core.c while (*start == ' ') start 290 drivers/mfd/aat2870-core.c start++; start 292 drivers/mfd/aat2870-core.c ret = kstrtoul(start, 16, &addr); start 301 drivers/mfd/aat2870-core.c while (*start == ' ') start 302 drivers/mfd/aat2870-core.c start++; start 304 drivers/mfd/aat2870-core.c ret = kstrtoul(start, 16, &val); start 1115 drivers/mfd/ab8500-core.c ab8500->irq = resource->start; start 2680 drivers/mfd/ab8500-debugfs.c irq_ab8500 = res->start; start 143 drivers/mfd/altera-sysmgr.c sysmgr->base = (resource_size_t *)res->start; start 150 drivers/mfd/altera-sysmgr.c sysmgr->base = devm_ioremap(dev, res->start, start 155 drivers/mfd/altera-sysmgr.c sysmgr_config.max_register = res->end - res->start - 3; start 29 drivers/mfd/as3722.c .start = AS3722_IRQ_RTC_ALARM, start 38 drivers/mfd/as3722.c .start = AS3722_IRQ_ADC, start 645 drivers/mfd/asic3.c .start = ASIC3_OWM_BASE, start 650 drivers/mfd/asic3.c .start = ASIC3_IRQ_OWM, start 727 drivers/mfd/asic3.c .start = ASIC3_SD_CTRL_BASE, start 732 drivers/mfd/asic3.c .start = 0, start 895 drivers/mfd/asic3.c ds1wm_resources[0].start >>= asic->bus_shift; start 901 drivers/mfd/asic3.c asic->bus_shift) + mem_sdio->start, start 909 drivers/mfd/asic3.c asic3_mmc_resources[0].start >>= asic->bus_shift; start 975 drivers/mfd/asic3.c asic->mapping = ioremap(mem->start, resource_size(mem)); start 40 drivers/mfd/cs5535-mfd.c if (!request_region(res->start, resource_size(res), DRV_NAME)) { start 58 drivers/mfd/cs5535-mfd.c release_region(res->start, resource_size(res)); start 123 drivers/mfd/cs5535-mfd.c r->start = pci_resource_start(pdev, bar); start 259 drivers/mfd/da9055-core.c .start = DA9055_IRQ_NONKEY, start 267 drivers/mfd/da9055-core.c .start = DA9055_IRQ_ALARM, start 273 drivers/mfd/da9055-core.c .start = DA9055_IRQ_TICK, start 281 drivers/mfd/da9055-core.c .start = DA9055_IRQ_HWMON, start 288 drivers/mfd/da9055-core.c .start = DA9055_IRQ_REGULATOR, start 35 drivers/mfd/da9063-core.c .start = DA9063_IRQ_LDO_LIM, start 44 drivers/mfd/da9063-core.c .start = DA9063_IRQ_ALARM, start 50 drivers/mfd/da9063-core.c .start = DA9063_IRQ_TICK, start 59 drivers/mfd/da9063-core.c .start = DA9063_IRQ_ONKEY, start 67 drivers/mfd/da9063-core.c .start = DA9063_IRQ_ADC_RDY, start 51 drivers/mfd/davinci_voicecodec.c fifo_base = (dma_addr_t)res->start; start 73 drivers/mfd/davinci_voicecodec.c davinci_vc->davinci_vcif.dma_tx_channel = res->start; start 83 drivers/mfd/davinci_voicecodec.c davinci_vc->davinci_vcif.dma_rx_channel = res->start; start 2710 drivers/mfd/db8500-prcmu.c tcpm_base = ioremap(res->start, resource_size(res)); start 3087 drivers/mfd/db8500-prcmu.c prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 3100 drivers/mfd/db8500-prcmu.c tcdm_base = devm_ioremap(&pdev->dev, res->start, start 214 drivers/mfd/dm355evm_msp.c .start = irq, start 55 drivers/mfd/hi655x-pmic.c .start = PWRON_D20R_INT, start 60 drivers/mfd/hi655x-pmic.c .start = PWRON_D20F_INT, start 65 drivers/mfd/hi655x-pmic.c .start = PWRON_D4SR_INT, start 539 drivers/mfd/htc-i2cpld.c htcpld->chained_irq = res->start; start 104 drivers/mfd/htc-pasic3.c .start = 0, start 108 drivers/mfd/htc-pasic3.c .start = 0, start 137 drivers/mfd/htc-pasic3.c irq = r->start; start 144 drivers/mfd/htc-pasic3.c if (!request_mem_region(r->start, resource_size(r), "pasic3")) start 153 drivers/mfd/htc-pasic3.c asic->mapping = ioremap(r->start, resource_size(r)); start 193 drivers/mfd/htc-pasic3.c release_mem_region(r->start, resource_size(r)); start 246 drivers/mfd/intel-lpss.c resource_size_t addr = lpss->info->mem->start; start 387 drivers/mfd/intel-lpss.c lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET, start 165 drivers/mfd/intel_quark_i2c_gpio.c res[INTEL_QUARK_IORES_MEM].start = start 170 drivers/mfd/intel_quark_i2c_gpio.c res[INTEL_QUARK_IORES_IRQ].start = pdev->irq; start 196 drivers/mfd/intel_quark_i2c_gpio.c res[INTEL_QUARK_IORES_MEM].start = start 132 drivers/mfd/intel_soc_pmic_mrfld.c irq_level2_resources[i].start = ret; start 90 drivers/mfd/janz-cmodio.c res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno); start 91 drivers/mfd/janz-cmodio.c res->end = res->start + CMODIO_MODULBUS_SIZE - 1; start 97 drivers/mfd/janz-cmodio.c res->start = pci->resource[4].start; start 110 drivers/mfd/janz-cmodio.c res->start = 0; start 120 drivers/mfd/kempld-core.c .start = KEMPLD_IOINDEX, start 453 drivers/mfd/kempld-core.c pld->io_base = devm_ioport_map(dev, ioport->start, start 40 drivers/mfd/lp8788.c .start = LP8788_INT_CHG_INPUT_STATE, start 47 drivers/mfd/lp8788.c .start = LP8788_INT_ENTER_SYS_SUPPORT, start 54 drivers/mfd/lp8788.c .start = LP8788_INT_BATT_LOW, start 63 drivers/mfd/lp8788.c .start = LP8788_INT_RTC_ALARM1, start 918 drivers/mfd/lpc_ich.c !acpi_check_region(res->start + 0x40, 0x10, "LPC ICH GPIO3")) start 921 drivers/mfd/lpc_ich.c if (!acpi_check_region(res->start + 0x30, 0x10, "LPC ICH GPIO2")) start 924 drivers/mfd/lpc_ich.c ret = acpi_check_region(res->start + 0x00, 0x30, "LPC ICH GPIO1"); start 950 drivers/mfd/lpc_ich.c res->start = base_addr + ACPIBASE_GPE_OFF; start 977 drivers/mfd/lpc_ich.c res->start = base_addr; start 981 drivers/mfd/lpc_ich.c res->end = res->start + 128 - 1; start 984 drivers/mfd/lpc_ich.c res->end = res->start + 64 - 1; start 1030 drivers/mfd/lpc_ich.c res->start = base_addr + ACPIBASE_TCO_OFF; start 1034 drivers/mfd/lpc_ich.c res->start = base_addr + ACPIBASE_SMI_OFF; start 1063 drivers/mfd/lpc_ich.c res->start = base_addr + ACPIBASE_GCS_OFF; start 1071 drivers/mfd/lpc_ich.c res->start = base_addr + ACPIBASE_PMC_OFF; start 1103 drivers/mfd/lpc_ich.c res->start = spi_base & ~(SPIBASE_BYT_SZ - 1); start 1104 drivers/mfd/lpc_ich.c res->end = res->start + SPIBASE_BYT_SZ - 1; start 1112 drivers/mfd/lpc_ich.c res->start = spi_base + SPIBASE_LPT; start 1113 drivers/mfd/lpc_ich.c res->end = res->start + SPIBASE_LPT_SZ - 1; start 1134 drivers/mfd/lpc_ich.c res->start = spi_base & 0xfffffff0; start 1135 drivers/mfd/lpc_ich.c res->end = res->start + SPIBASE_APL_SZ - 1; start 1149 drivers/mfd/lpc_ich.c if (!res->start) start 108 drivers/mfd/lpc_sch.c res->start = base_addr; start 144 drivers/mfd/lpc_sch.c res->start = irq; start 39 drivers/mfd/max8925-core.c .start = MAX8925_TSC_IRQ, start 57 drivers/mfd/max8925-core.c .start = MAX8925_CHG_IRQ1, start 75 drivers/mfd/max8925-core.c .start = MAX8925_IRQ_RTC_ALARM0, start 93 drivers/mfd/max8925-core.c .start = MAX8925_IRQ_GPM_SW_R, start 98 drivers/mfd/max8925-core.c .start = MAX8925_IRQ_GPM_SW_F, start 169 drivers/mfd/mcp-sa11x0.c if (!request_mem_region(mem0->start, resource_size(mem0), start 175 drivers/mfd/mcp-sa11x0.c if (!request_mem_region(mem1->start, resource_size(mem1), start 195 drivers/mfd/mcp-sa11x0.c m->base0 = ioremap(mem0->start, resource_size(mem0)); start 196 drivers/mfd/mcp-sa11x0.c m->base1 = ioremap(mem1->start, resource_size(mem1)); start 229 drivers/mfd/mcp-sa11x0.c release_mem_region(mem1->start, resource_size(mem1)); start 231 drivers/mfd/mcp-sa11x0.c release_mem_region(mem0->start, resource_size(mem0)); start 253 drivers/mfd/mcp-sa11x0.c release_mem_region(mem1->start, resource_size(mem1)); start 254 drivers/mfd/mcp-sa11x0.c release_mem_region(mem0->start, resource_size(mem0)); start 210 drivers/mfd/mfd-core.c res[r].start = mem_base->start + start 211 drivers/mfd/mfd-core.c cell->resources[r].start; start 212 drivers/mfd/mfd-core.c res[r].end = mem_base->start + start 217 drivers/mfd/mfd-core.c WARN_ON(cell->resources[r].start != start 219 drivers/mfd/mfd-core.c res[r].start = res[r].end = irq_create_mapping( start 220 drivers/mfd/mfd-core.c domain, cell->resources[r].start); start 222 drivers/mfd/mfd-core.c res[r].start = irq_base + start 223 drivers/mfd/mfd-core.c cell->resources[r].start; start 229 drivers/mfd/mfd-core.c res[r].start = cell->resources[r].start; start 20 drivers/mfd/rdc321x-southbridge.c .start = RDC321X_WDT_CTRL, start 33 drivers/mfd/rdc321x-southbridge.c .start = RDC321X_GPIO_CTRL_REG1, start 38 drivers/mfd/rdc321x-southbridge.c .start = RDC321X_GPIO_CTRL_REG2, start 51 drivers/mfd/retu-mfd.c .start = RETU_INT_PWR, start 90 drivers/mfd/retu-mfd.c .start = TAHVO_INT_VBUS, start 113 drivers/mfd/rk808.c .start = RK808_IRQ_RTC_ALARM, start 125 drivers/mfd/rk808.c .start = RK805_IRQ_PWRON_FALL, start 130 drivers/mfd/rk808.c .start = RK805_IRQ_PWRON_RISE, start 769 drivers/mfd/sm501.c res->start = sm->io_res->start + offs; start 770 drivers/mfd/sm501.c res->end = res->start + size - 1; start 787 drivers/mfd/sm501.c res->start = sm->mem_res->start + *offs; start 788 drivers/mfd/sm501.c res->end = res->start + size - 1; start 801 drivers/mfd/sm501.c res->start = res->end = sm->irq; start 825 drivers/mfd/sm501.c uart_data->mapbase = sm->io_res->start + offset; start 1043 drivers/mfd/sm501.c resource_size_t iobase = sm->io_res->start + SM501_GPIO; start 1410 drivers/mfd/sm501.c sm->regs_claim = request_mem_region(sm->io_res->start, start 1420 drivers/mfd/sm501.c sm->regs = ioremap(sm->io_res->start, resource_size(sm->io_res)); start 1621 drivers/mfd/sm501.c sm->regs_claim = request_mem_region(sm->io_res->start, start 29 drivers/mfd/sta2x11-mfd.c unsigned int start, start 32 drivers/mfd/sta2x11-mfd.c return ((r >= start) && (r <= end)); start 316 drivers/mfd/sta2x11-mfd.c if (!request_mem_region(res->start, resource_size(res), name)) start 319 drivers/mfd/sta2x11-mfd.c mfd->regs[index] = ioremap(res->start, resource_size(res)); start 321 drivers/mfd/sta2x11-mfd.c release_mem_region(res->start, resource_size(res)); start 421 drivers/mfd/sta2x11-mfd.c .start = _cell * 4096, .end = _cell * 4096 + 4095, \ start 429 drivers/mfd/sta2x11-mfd.c .start = 0, start 220 drivers/mfd/stm32-timers.c ddata->dma.phys_base = res->start; start 1322 drivers/mfd/stmpe.c res->start = res->end = block->irq + j; start 24 drivers/mfd/sun6i-prcm.c .start = 0x0, start 32 drivers/mfd/sun6i-prcm.c .start = 0xc, start 40 drivers/mfd/sun6i-prcm.c .start = 0x28, start 48 drivers/mfd/sun6i-prcm.c .start = 0x54, start 56 drivers/mfd/sun6i-prcm.c .start = 0xb0, start 63 drivers/mfd/syscon.c base = ioremap(res.start, resource_size(&res)); start 245 drivers/mfd/syscon.c base = devm_ioremap(dev, res->start, resource_size(res)); start 249 drivers/mfd/syscon.c syscon_config.max_register = res->end - res->start - 3; start 41 drivers/mfd/t7l66xb.c .start = 0x800, start 46 drivers/mfd/t7l66xb.c .start = IRQ_T7L66XB_MMC, start 100 drivers/mfd/t7l66xb.c t7l66xb_mmc_resources[0].start & 0xfffe); start 148 drivers/mfd/t7l66xb.c .start = 0xc00, start 153 drivers/mfd/t7l66xb.c .start = 0x0100, start 158 drivers/mfd/t7l66xb.c .start = IRQ_T7L66XB_NAND, start 295 drivers/mfd/t7l66xb.c t7l66xb_mmc_resources[0].start & 0xfffe); start 350 drivers/mfd/t7l66xb.c rscr->start = iomem->start; start 351 drivers/mfd/t7l66xb.c rscr->end = iomem->start + 0xff; start 358 drivers/mfd/t7l66xb.c t7l66xb->scr = ioremap(rscr->start, resource_size(rscr)); start 376 drivers/mfd/t7l66xb.c (unsigned long)iomem->start, t7l66xb->irq); start 146 drivers/mfd/tc3589x.c .start = TC3589x_INT_GPIIRQ, start 154 drivers/mfd/tc3589x.c .start = TC3589x_INT_KBDIRQ, start 30 drivers/mfd/tc6387xb.c .start = 0x800, start 35 drivers/mfd/tc6387xb.c .start = 0, start 66 drivers/mfd/tc6387xb.c tc6387xb_mmc_resources[0].start & 0xfffe); start 99 drivers/mfd/tc6387xb.c tc6387xb_mmc_resources[0].start & 0xfffe); start 163 drivers/mfd/tc6387xb.c rscr->start = iomem->start; start 164 drivers/mfd/tc6387xb.c rscr->end = iomem->start + 0xff; start 171 drivers/mfd/tc6387xb.c tc6387xb->scr = ioremap(rscr->start, resource_size(rscr)); start 138 drivers/mfd/tc6393xb.c .start = 0x1000, start 143 drivers/mfd/tc6393xb.c .start = 0x0100, start 148 drivers/mfd/tc6393xb.c .start = IRQ_TC6393_NAND, start 156 drivers/mfd/tc6393xb.c .start = 0x800, start 161 drivers/mfd/tc6393xb.c .start = IRQ_TC6393_MMC, start 169 drivers/mfd/tc6393xb.c .start = 0x3000, start 174 drivers/mfd/tc6393xb.c .start = 0x0300, start 179 drivers/mfd/tc6393xb.c .start = 0x010000, start 184 drivers/mfd/tc6393xb.c .start = 0x018000, start 189 drivers/mfd/tc6393xb.c .start = IRQ_TC6393_OHCI, start 197 drivers/mfd/tc6393xb.c .start = 0x5000, start 202 drivers/mfd/tc6393xb.c .start = 0x0500, start 207 drivers/mfd/tc6393xb.c .start = 0x100000, start 212 drivers/mfd/tc6393xb.c .start = IRQ_TC6393_FB, start 351 drivers/mfd/tc6393xb.c tc6393xb_mmc_resources[0].start & 0xfffe); start 361 drivers/mfd/tc6393xb.c tc6393xb_mmc_resources[0].start & 0xfffe); start 642 drivers/mfd/tc6393xb.c rscr->start = iomem->start; start 643 drivers/mfd/tc6393xb.c rscr->end = iomem->start + 0xff; start 650 drivers/mfd/tc6393xb.c tc6393xb->scr = ioremap(rscr->start, resource_size(rscr)); start 677 drivers/mfd/tc6393xb.c (unsigned long) iomem->start, tc6393xb->irq); start 185 drivers/mfd/ti_am335x_tscadc.c tscadc->tscadc_phys_base = res->start; start 82 drivers/mfd/timberdale.c .start = XIICOFFSET, start 87 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_I2C, start 95 drivers/mfd/timberdale.c .start = OCORESOFFSET, start 100 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_I2C, start 143 drivers/mfd/timberdale.c .start = SPIOFFSET, start 148 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_SPI, start 162 drivers/mfd/timberdale.c .start = ETHOFFSET, start 167 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_ETHSW_IF, start 182 drivers/mfd/timberdale.c .start = GPIOOFFSET, start 187 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_GPIO, start 195 drivers/mfd/timberdale.c .start = MLCOREOFFSET, start 200 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_MLCORE, start 205 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_MLCORE_BUF, start 213 drivers/mfd/timberdale.c .start = UARTOFFSET, start 218 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_UART, start 226 drivers/mfd/timberdale.c .start = UARTLITEOFFSET, start 231 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_UARTLITE, start 255 drivers/mfd/timberdale.c .start = RDSOFFSET, start 260 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_RDS, start 283 drivers/mfd/timberdale.c .start = LOGIWOFFSET, start 354 drivers/mfd/timberdale.c .start = DMAOFFSET, start 359 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_DMA, start 599 drivers/mfd/timberdale.c .start = SDHC0OFFSET, start 604 drivers/mfd/timberdale.c .start = IRQ_TIMBERDALE_SDHC, start 43 drivers/mfd/tps65090.c .start = TPS65090_IRQ_VAC_STATUS_CHANGE, start 97 drivers/mfd/tps6586x.c .start = TPS6586X_INT_RTC_ALM1, start 26 drivers/mfd/tps65910.c .start = TPS65910_IRQ_RTC_ALARM, start 39 drivers/mfd/tps80031.c .start = TPS80031_INT_RTC_ALARM, start 212 drivers/mfd/tqmx86.c tqmx_gpio_resources[0].start = gpio_irq; start 626 drivers/mfd/twl-core.c { .start = irq0, .flags = IORESOURCE_IRQ, }, start 627 drivers/mfd/twl-core.c { .start = irq1, .flags = IORESOURCE_IRQ, }, start 764 drivers/mfd/twl6040.c twl6040_codec_rsrc[0].start = irq; start 776 drivers/mfd/twl6040.c twl6040_vibra_rsrc[0].start = irq; start 168 drivers/mfd/vexpress-sysreg.c base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); start 84 drivers/mfd/vx855.c vx855_gpio_resources[0].start = gpio_io_offset + VX855_PMIO_R_GPI; start 85 drivers/mfd/vx855.c vx855_gpio_resources[0].end = vx855_gpio_resources[0].start + 3; start 86 drivers/mfd/vx855.c vx855_gpio_resources[1].start = gpio_io_offset + VX855_PMIO_R_GPO; start 87 drivers/mfd/vx855.c vx855_gpio_resources[1].end = vx855_gpio_resources[1].start + 3; start 617 drivers/mfd/wm831x-core.c .start = WM831X_DC1_CONTROL_1, start 623 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_DC1, start 629 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_HC_DC1, start 638 drivers/mfd/wm831x-core.c .start = WM831X_DC2_CONTROL_1, start 644 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_DC2, start 650 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_HC_DC2, start 658 drivers/mfd/wm831x-core.c .start = WM831X_DC3_CONTROL_1, start 664 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_DC3, start 672 drivers/mfd/wm831x-core.c .start = WM831X_DC4_CONTROL, start 678 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_DC4, start 686 drivers/mfd/wm831x-core.c .start = WM831X_DC4_CONTROL, start 692 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_DC4, start 700 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_GPIO_1, start 708 drivers/mfd/wm831x-core.c .start = WM831X_CURRENT_SINK_1, start 713 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CS1, start 721 drivers/mfd/wm831x-core.c .start = WM831X_CURRENT_SINK_2, start 726 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CS2, start 734 drivers/mfd/wm831x-core.c .start = WM831X_LDO1_CONTROL, start 740 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO1, start 748 drivers/mfd/wm831x-core.c .start = WM831X_LDO2_CONTROL, start 754 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO2, start 762 drivers/mfd/wm831x-core.c .start = WM831X_LDO3_CONTROL, start 768 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO3, start 776 drivers/mfd/wm831x-core.c .start = WM831X_LDO4_CONTROL, start 782 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO4, start 790 drivers/mfd/wm831x-core.c .start = WM831X_LDO5_CONTROL, start 796 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO5, start 804 drivers/mfd/wm831x-core.c .start = WM831X_LDO6_CONTROL, start 810 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO6, start 818 drivers/mfd/wm831x-core.c .start = WM831X_LDO7_CONTROL, start 824 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO7, start 832 drivers/mfd/wm831x-core.c .start = WM831X_LDO8_CONTROL, start 838 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO8, start 846 drivers/mfd/wm831x-core.c .start = WM831X_LDO9_CONTROL, start 852 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO9, start 860 drivers/mfd/wm831x-core.c .start = WM831X_LDO10_CONTROL, start 866 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_UV_LDO10, start 874 drivers/mfd/wm831x-core.c .start = WM831X_LDO11_ON_CONTROL, start 882 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_ON, start 892 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_PPM_SYSLO, start 898 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_PPM_PWR_SRC, start 904 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_PPM_USB_CURR, start 910 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_BATT_HOT, start 916 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_BATT_COLD, start 922 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_BATT_FAIL, start 928 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_OV, start 934 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_END, start 940 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_TO, start 946 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_MODE, start 952 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_CHG_START, start 961 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_RTC_PER, start 967 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_RTC_ALM, start 975 drivers/mfd/wm831x-core.c .start = WM831X_STATUS_LED_1, start 983 drivers/mfd/wm831x-core.c .start = WM831X_STATUS_LED_2, start 992 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_TCHPD, start 998 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_TCHDATA, start 1006 drivers/mfd/wm831x-core.c .start = WM831X_IRQ_WDOG_TO, start 45 drivers/mfd/wm8994-core.c .start = WM8994_IRQ_TEMP_SHUT, start 53 drivers/mfd/wm8994-core.c .start = WM8994_IRQ_GPIO(1), start 220 drivers/misc/atmel-ssc.c ssc->phybase = regs->start; start 340 drivers/misc/cs5535-mfgpt.c if (!request_region(res->start, resource_size(res), pdev->name)) { start 346 drivers/misc/cs5535-mfgpt.c cs5535_mfgpt_chip.base = res->start; start 184 drivers/misc/cxl/context.c u64 start = vma->vm_pgoff << PAGE_SHIFT; start 188 drivers/misc/cxl/context.c if (start + len > ctx->afu->adapter->ps_size) start 206 drivers/misc/cxl/context.c if (start + len > ctx->psn_size) start 135 drivers/misc/cxl/native.c u64 start, end; start 162 drivers/misc/cxl/native.c start = local_clock(); start 195 drivers/misc/cxl/native.c pr_devel("PSL purged in %lld ns\n", end - start); start 44 drivers/misc/enclosure.c struct enclosure_device *start) start 49 drivers/misc/enclosure.c edev = list_prepare_entry(start, &container_list, node); start 50 drivers/misc/enclosure.c if (start) start 51 drivers/misc/enclosure.c put_device(&start->edev); start 108 drivers/misc/fastrpc.c u64 start; start 366 drivers/misc/fastrpc.c int st = CMP(pa->start, pb->start); start 379 drivers/misc/fastrpc.c ctx->olaps[i].start = ctx->args[i].ptr; start 380 drivers/misc/fastrpc.c ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; start 388 drivers/misc/fastrpc.c if (ctx->olaps[i].start < max_end) { start 391 drivers/misc/fastrpc.c ctx->olaps[i].offset = max_end - ctx->olaps[i].start; start 402 drivers/misc/fastrpc.c ctx->olaps[i].mstart = ctx->olaps[i].start; start 862 drivers/misc/habanalabs/habanalabs.h u64 start; start 387 drivers/misc/habanalabs/memory.c va_block->start, va_block->end, va_block->size); start 410 drivers/misc/habanalabs/memory.c if (&prev->node != va_list && prev->end + 1 == va_block->start) { start 412 drivers/misc/habanalabs/memory.c prev->size = prev->end - prev->start; start 419 drivers/misc/habanalabs/memory.c if (&next->node != va_list && va_block->end + 1 == next->start) { start 420 drivers/misc/habanalabs/memory.c next->start = va_block->start; start 421 drivers/misc/habanalabs/memory.c next->size = next->end - next->start; start 442 drivers/misc/habanalabs/memory.c struct list_head *va_list, u64 start, u64 end) start 445 drivers/misc/habanalabs/memory.c u64 size = end - start; start 451 drivers/misc/habanalabs/memory.c if (hl_mem_area_crosses_range(start, size, va_block->start, start 455 drivers/misc/habanalabs/memory.c va_block->start, va_block->end); start 459 drivers/misc/habanalabs/memory.c if (va_block->end < start) start 467 drivers/misc/habanalabs/memory.c va_block->start = start; start 495 drivers/misc/habanalabs/memory.c struct hl_va_range *va_range, u64 start, u64 end) start 500 drivers/misc/habanalabs/memory.c rc = add_va_block_locked(hdev, &va_range->list, start, end); start 550 drivers/misc/habanalabs/memory.c valid_start = va_block->start; start 585 drivers/misc/habanalabs/memory.c if (res_valid_start > new_va_block->start) { start 586 drivers/misc/habanalabs/memory.c prev_start = new_va_block->start; start 589 drivers/misc/habanalabs/memory.c new_va_block->start = res_valid_start; start 596 drivers/misc/habanalabs/memory.c new_va_block->start += size; start 597 drivers/misc/habanalabs/memory.c new_va_block->size = new_va_block->end - new_va_block->start; start 1231 drivers/misc/habanalabs/memory.c u64 start, end; start 1257 drivers/misc/habanalabs/memory.c start = addr & PAGE_MASK; start 1260 drivers/misc/habanalabs/memory.c npages = (end - start) >> PAGE_SHIFT; start 1273 drivers/misc/habanalabs/memory.c rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, start 1418 drivers/misc/habanalabs/memory.c struct hl_va_range *va_range, u64 start, u64 end) start 1426 drivers/misc/habanalabs/memory.c if (start & (PAGE_SIZE - 1)) { start 1427 drivers/misc/habanalabs/memory.c start &= PAGE_MASK; start 1428 drivers/misc/habanalabs/memory.c start += PAGE_SIZE; start 1434 drivers/misc/habanalabs/memory.c if (start >= end) { start 1439 drivers/misc/habanalabs/memory.c rc = add_va_block(hdev, va_range, start, end); start 1446 drivers/misc/habanalabs/memory.c va_range->start_addr = start; start 1578 drivers/misc/habanalabs/memory.c if (va_block->start != va_range->start_addr || start 1582 drivers/misc/habanalabs/memory.c va_block->start, va_block->end); start 770 drivers/misc/hpilo.c int devnum, minor, start, error = 0; start 832 drivers/misc/hpilo.c start = devnum * max_ccb; start 833 drivers/misc/hpilo.c error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb); start 127 drivers/misc/lkdtm/refcount.c static void check_negative(refcount_t *ref, int start) start 134 drivers/misc/lkdtm/refcount.c if (refcount_read(ref) == start) { start 136 drivers/misc/lkdtm/refcount.c start); start 467 drivers/misc/mei/hbm.c struct mei_cl *cl, u8 start) start 478 drivers/misc/mei/hbm.c req.start = start; start 500 drivers/misc/mei/hbm.c return mei_cl_notify_req2fop(rs->start); start 222 drivers/misc/mei/hw-txe.c ktime_t stop, start; start 224 drivers/misc/mei/hw-txe.c start = ktime_get(); start 225 drivers/misc/mei/hw-txe.c stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT)); start 231 drivers/misc/mei/hw-txe.c ktime_to_us(ktime_sub(ktime_get(), start))); start 420 drivers/misc/mei/hw.h u8 start; start 445 drivers/misc/mei/hw.h u8 start; start 641 drivers/misc/mei/main.c static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync) start 102 drivers/misc/mic/bus/cosm_bus.h int (*start)(struct cosm_device *cdev, int id); start 105 drivers/misc/mic/cosm/cosm_main.c rc = cdev->hw_ops->start(cdev, cdev->index); start 582 drivers/misc/mic/host/mic_boot.c .start = _mic_start, start 141 drivers/misc/mic/host/mic_smpt.c u64 start = dma_addr; start 145 drivers/misc/mic/host/mic_smpt.c while (start < end) { start 146 drivers/misc/mic/host/mic_smpt.c ref[i++] = min(mic_smpt_align_high(mdev, start + 1), start 147 drivers/misc/mic/host/mic_smpt.c end) - start; start 148 drivers/misc/mic/host/mic_smpt.c start = mic_smpt_align_high(mdev, start + 1); start 109 drivers/misc/mic/scif/scif_dma.c u64 start, u64 len) start 114 drivers/misc/mic/scif/scif_dma.c u64 end = start + len; start 116 drivers/misc/mic/scif/scif_dma.c if (end <= start) start 125 drivers/misc/mic/scif/scif_dma.c if (start < start_va && end <= start_va) start 127 drivers/misc/mic/scif/scif_dma.c if (start >= end_va) start 133 drivers/misc/mic/scif/scif_dma.c static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len) start 138 drivers/misc/mic/scif/scif_dma.c __scif_rma_destroy_tcw(mmn, start, len); start 200 drivers/misc/mic/scif/scif_dma.c scif_rma_destroy_tcw(mmn, range->start, range->end - range->start); start 751 drivers/misc/mic/scif/scif_dma.c s64 start, end; start 764 drivers/misc/mic/scif/scif_dma.c start = iter->offset; start 767 drivers/misc/mic/scif/scif_dma.c start = window->offset; start 770 drivers/misc/mic/scif/scif_dma.c end = start + (window->num_pages[i] << PAGE_SHIFT); start 771 drivers/misc/mic/scif/scif_dma.c if (off >= start && off < end) { start 774 drivers/misc/mic/scif/scif_dma.c iter->offset = start; start 778 drivers/misc/mic/scif/scif_dma.c return (window->dma_addr[i] + (off - start)); start 780 drivers/misc/mic/scif/scif_dma.c start += (window->num_pages[i] << PAGE_SHIFT); start 35 drivers/misc/mic/scif/scif_ports.c static int __scif_get_port(int start, int end) start 43 drivers/misc/mic/scif/scif_ports.c id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC); start 1042 drivers/misc/mic/vop/vop_vringh.c unsigned long start = MIC_DP_SIZE; start 1062 drivers/misc/mic/vop/vop_vringh.c if (offset == start) { start 1067 drivers/misc/mic/vop/vop_vringh.c start += vvr->vring.len; start 106 drivers/misc/ocxl/ocxl_internal.h void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size); start 108 drivers/misc/ocxl/ocxl_internal.h void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size); start 8 drivers/misc/ocxl/pasid.c u32 start; start 19 drivers/misc/ocxl/pasid.c pr_debug("Range %d->%d\n", cur->start, cur->end); start 38 drivers/misc/ocxl/pasid.c if ((cur->start - last_end) > size) start 44 drivers/misc/ocxl/pasid.c new->start = last_end + 1; start 45 drivers/misc/ocxl/pasid.c new->end = new->start + size - 1; start 52 drivers/misc/ocxl/pasid.c rc = new->start; start 61 drivers/misc/ocxl/pasid.c static void range_free(struct list_head *head, u32 start, u32 size, start 68 drivers/misc/ocxl/pasid.c if (cur->start == start && cur->end == (start + size - 1)) { start 91 drivers/misc/ocxl/pasid.c void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size) start 93 drivers/misc/ocxl/pasid.c return range_free(&fn->pasid_list, start, size, "afu pasid"); start 104 drivers/misc/ocxl/pasid.c void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size) start 106 drivers/misc/ocxl/pasid.c return range_free(&fn->actag_list, start, size, "afu actag"); start 74 drivers/misc/pvpanic.c base = ioport_map(r.start, resource_size(&r)); start 80 drivers/misc/pvpanic.c base = ioremap(r.start, resource_size(&r)); start 120 drivers/misc/sgi-gru/grukservices.c void *start; start 552 drivers/misc/sgi-gru/grukservices.c mq->start = &mq->data; start 851 drivers/misc/sgi-gru/grukservices.c next = mq->start; start 232 drivers/misc/sgi-gru/gruprocfs.c .start = seq_start, start 239 drivers/misc/sgi-gru/gruprocfs.c .start = seq_start, start 659 drivers/misc/sgi-gru/grutables.h extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, start 145 drivers/misc/sgi-gru/grutlbpurge.c void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, start 162 drivers/misc/sgi-gru/grutlbpurge.c start, len, gms->ms_asidmap[0]); start 172 drivers/misc/sgi-gru/grutlbpurge.c asid = GRUASID(asid, start); start 175 drivers/misc/sgi-gru/grutlbpurge.c gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); start 177 drivers/misc/sgi-gru/grutlbpurge.c tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, start 218 drivers/misc/sgi-gru/grutlbpurge.c range->start, range->end, atomic_read(&gms->ms_range_active)); start 219 drivers/misc/sgi-gru/grutlbpurge.c gru_flush_tlb_range(gms, range->start, range->end - range->start); start 235 drivers/misc/sgi-gru/grutlbpurge.c gms, range->start, range->end); start 56 drivers/misc/sram.c phys_addr_t start, struct sram_partition *part) start 65 drivers/misc/sram.c ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start, start 76 drivers/misc/sram.c phys_addr_t start, struct sram_partition *part) start 81 drivers/misc/sram.c (unsigned long long)start); start 94 drivers/misc/sram.c phys_addr_t start) start 100 drivers/misc/sram.c part->base = sram->virt_base + block->start; start 103 drivers/misc/sram.c ret = sram_add_pool(sram, block, start, part); start 108 drivers/misc/sram.c ret = sram_add_export(sram, block, start, part); start 117 drivers/misc/sram.c ret = sram_add_pool(sram, block, start, part); start 153 drivers/misc/sram.c return ra->start - rb->start; start 191 drivers/misc/sram.c if (child_res.start < res->start || child_res.end > res->end) { start 199 drivers/misc/sram.c block->start = child_res.start - res->start; start 236 drivers/misc/sram.c block->start, block->start + block->size); start 239 drivers/misc/sram.c block->start, block->start + block->size); start 247 drivers/misc/sram.c rblocks[nblocks - 1].start = size; start 266 drivers/misc/sram.c if (block->start < cur_start) { start 269 drivers/misc/sram.c block->start, cur_start); start 278 drivers/misc/sram.c res->start + block->start); start 286 drivers/misc/sram.c if (block->start == cur_start) { start 287 drivers/misc/sram.c cur_start = block->start + block->size; start 296 drivers/misc/sram.c cur_size = block->start - cur_start; start 303 drivers/misc/sram.c res->start + cur_start, cur_size, -1); start 310 drivers/misc/sram.c cur_start = block->start + block->size; start 362 drivers/misc/sram.c if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) { start 368 drivers/misc/sram.c sram->virt_base = devm_ioremap(sram->dev, res->start, size); start 370 drivers/misc/sram.c sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size); start 30 drivers/misc/sram.h u32 start; start 51 drivers/mmc/core/mmc.c #define UNSTUFF_BITS(resp,start,size) \ start 55 drivers/mmc/core/mmc.c const int __off = 3 - ((start) / 32); \ start 56 drivers/mmc/core/mmc.c const int __shft = (start) & 31; \ start 55 drivers/mmc/core/sd.c #define UNSTUFF_BITS(resp,start,size) \ start 59 drivers/mmc/core/sd.c const int __off = 3 - ((start) / 32); \ start 60 drivers/mmc/core/sd.c const int __shft = (start) & 31; \ start 462 drivers/mmc/host/android-goldfish.c pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end); start 463 drivers/mmc/host/android-goldfish.c host->reg_base = ioremap(res->start, resource_size(res)); start 2481 drivers/mmc/host/atmel-mci.c host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); start 2492 drivers/mmc/host/atmel-mci.c host->mapbase = regs->start; start 884 drivers/mmc/host/au1xmmc.c txid = res->start; start 889 drivers/mmc/host/au1xmmc.c rxid = res->start; start 974 drivers/mmc/host/au1xmmc.c host->ioarea = request_mem_region(r->start, resource_size(r), start 981 drivers/mmc/host/au1xmmc.c host->iobase = ioremap(r->start, 0x3c); start 992 drivers/mmc/host/au1xmmc.c host->irq = r->start; start 1019 drivers/mmc/host/au1xmmc.c if (host->ioarea->start == AU1100_SD0_PHYS_ADDR) start 58 drivers/mmc/host/cavium-octeon.c static void l2c_lock_mem_region(u64 start, u64 len) start 63 drivers/mmc/host/cavium-octeon.c end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE); start 64 drivers/mmc/host/cavium-octeon.c start = ALIGN(start, CVMX_CACHE_LINE_SIZE); start 66 drivers/mmc/host/cavium-octeon.c while (start <= end) { start 67 drivers/mmc/host/cavium-octeon.c l2c_lock_line(start); start 68 drivers/mmc/host/cavium-octeon.c start += CVMX_CACHE_LINE_SIZE; start 74 drivers/mmc/host/cavium-octeon.c static void l2c_unlock_mem_region(u64 start, u64 len) start 79 drivers/mmc/host/cavium-octeon.c end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE); start 80 drivers/mmc/host/cavium-octeon.c start = ALIGN(start, CVMX_CACHE_LINE_SIZE); start 82 drivers/mmc/host/cavium-octeon.c while (start <= end) { start 83 drivers/mmc/host/cavium-octeon.c l2c_unlock_line(start); start 84 drivers/mmc/host/cavium-octeon.c start += CVMX_CACHE_LINE_SIZE; start 1083 drivers/mmc/host/cqhci.c cqhci_memres->start, start 410 drivers/mmc/host/davinci_mmc.c .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, start 432 drivers/mmc/host/davinci_mmc.c .src_addr = host->mem_res->start + DAVINCI_MMCDRR, start 1208 drivers/mmc/host/davinci_mmc.c mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, start 1221 drivers/mmc/host/davinci_mmc.c host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); start 50 drivers/mmc/host/dw_mmc-pltfm.c host->phy_regs = regs->start; start 144 drivers/mmc/host/dw_mmc-rockchip.c int start; start 175 drivers/mmc/host/dw_mmc-rockchip.c ranges[range_count-1].start = i; start 207 drivers/mmc/host/dw_mmc-rockchip.c ranges[0].start = ranges[range_count-1].start; start 211 drivers/mmc/host/dw_mmc-rockchip.c if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) { start 220 drivers/mmc/host/dw_mmc-rockchip.c int len = (ranges[i].end - ranges[i].start + 1); start 231 drivers/mmc/host/dw_mmc-rockchip.c TUNING_ITERATION_TO_PHASE(ranges[i].start, start 240 drivers/mmc/host/dw_mmc-rockchip.c TUNING_ITERATION_TO_PHASE(ranges[longest_range].start, start 247 drivers/mmc/host/dw_mmc-rockchip.c middle_phase = ranges[longest_range].start + longest_range_len / 2; start 92 drivers/mmc/host/dw_mmc-zx.c int ret, len = 0, start = 0, end = 0, delay, best = 0; start 97 drivers/mmc/host/dw_mmc-zx.c if (start >= 0) { start 100 drivers/mmc/host/dw_mmc-zx.c if ((end - start) > len) { start 101 drivers/mmc/host/dw_mmc-zx.c best = (start + end) >> 1; start 102 drivers/mmc/host/dw_mmc-zx.c len = end - start; start 105 drivers/mmc/host/dw_mmc-zx.c start = -1; start 109 drivers/mmc/host/dw_mmc-zx.c if (start < 0) start 110 drivers/mmc/host/dw_mmc-zx.c start = delay; start 113 drivers/mmc/host/dw_mmc-zx.c if (start >= 0) { start 115 drivers/mmc/host/dw_mmc-zx.c if ((end - start) > len) { start 116 drivers/mmc/host/dw_mmc-zx.c best = (start + end) >> 1; start 117 drivers/mmc/host/dw_mmc-zx.c len = end - start; start 124 drivers/mmc/host/dw_mmc-zx.c start, end); start 761 drivers/mmc/host/dw_mmc.c .start = dw_mci_idmac_start_dma, start 862 drivers/mmc/host/dw_mmc.c .start = dw_mci_edmac_start_dma, start 1121 drivers/mmc/host/dw_mmc.c if (host->dma_ops->start(host, sg_len)) { start 2950 drivers/mmc/host/dw_mmc.c if (host->dma_ops->init && host->dma_ops->start && start 239 drivers/mmc/host/dw_mmc.h int (*start)(struct dw_mci *host, unsigned int sg_len); start 292 drivers/mmc/host/jz4740_mmc.c conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; start 295 drivers/mmc/host/jz4740_mmc.c conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; start 708 drivers/mmc/host/meson-gx-mmc.c u32 start; start 736 drivers/mmc/host/meson-gx-mmc.c start = host->descs_dma_addr | START_DESC_BUSY; start 737 drivers/mmc/host/meson-gx-mmc.c writel(start, host->regs + SD_EMMC_START); start 903 drivers/mmc/host/meson-gx-mmc.c u32 start = readl(host->regs + SD_EMMC_START); start 905 drivers/mmc/host/meson-gx-mmc.c start &= ~START_DESC_BUSY; start 906 drivers/mmc/host/meson-gx-mmc.c writel(start, host->regs + SD_EMMC_START); start 1181 drivers/mmc/host/meson-gx-mmc.c host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF; start 187 drivers/mmc/host/mmc_spi.c unsigned long start = jiffies; start 202 drivers/mmc/host/mmc_spi.c if (time_is_before_jiffies(start + timeout)) start 209 drivers/mmc/host/mmc_spi.c if (time_is_before_jiffies(start + 1)) start 1887 drivers/mmc/host/mmci.c host->phybase = dev->res.start; start 2048 drivers/mmc/host/mmci.c amba_rev(dev), (unsigned long long)dev->res.start, start 607 drivers/mmc/host/moxart-mmc.c host->reg_phys = res_mmc.start; start 385 drivers/mmc/host/mtk-sd.c u8 start; start 1720 drivers/mmc/host/mtk-sd.c int start = 0, len = 0; start 1731 drivers/mmc/host/mtk-sd.c while (start < PAD_DELAY_MAX) { start 1732 drivers/mmc/host/mtk-sd.c len = get_delay_len(delay, start); start 1734 drivers/mmc/host/mtk-sd.c start_final = start; start 1737 drivers/mmc/host/mtk-sd.c start += len ? len : 1; start 1751 drivers/mmc/host/mtk-sd.c delay_phase.start = start_final; start 1819 drivers/mmc/host/mtk-sd.c (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) start 1844 drivers/mmc/host/mtk-sd.c if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4) start 1945 drivers/mmc/host/mtk-sd.c (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) start 2003 drivers/mmc/host/mtk-sd.c (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) start 1028 drivers/mmc/host/mxcmmc.c host->phys_base = res->start; start 1128 drivers/mmc/host/mxcmmc.c host->dmareq = res->start; start 1374 drivers/mmc/host/omap.c host->phys_base = res->start; start 1871 drivers/mmc/host/omap_hsmmc.c host->mapbase = res->start + pdata->reg_offset; start 183 drivers/mmc/host/pxamci.c config.src_addr = host->res->start + MMC_RXFIFO; start 184 drivers/mmc/host/pxamci.c config.dst_addr = host->res->start + MMC_TXFIFO; start 855 drivers/mmc/host/renesas_sdhi_core.c (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), start 289 drivers/mmc/host/renesas_sdhi_internal_dmac.c .start = renesas_sdhi_internal_dmac_start_dma, start 368 drivers/mmc/host/renesas_sdhi_sys_dmac.c cfg.dst_addr = res->start + start 442 drivers/mmc/host/renesas_sdhi_sys_dmac.c .start = renesas_sdhi_sys_dmac_start_dma, start 62 drivers/mmc/host/rtsx_pci_sdmmc.c static void dump_reg_range(struct realtek_pci_sdmmc *host, u16 start, u16 end) start 64 drivers/mmc/host/rtsx_pci_sdmmc.c u16 len = end - start + 1; start 74 drivers/mmc/host/rtsx_pci_sdmmc.c rtsx_pci_read_register(host->pcr, start + i + j, start 77 drivers/mmc/host/rtsx_pci_sdmmc.c start + i, n, data); start 651 drivers/mmc/host/rtsx_pci_sdmmc.c int start = 0, len = 0; start 660 drivers/mmc/host/rtsx_pci_sdmmc.c while (start < RTSX_PHASE_MAX) { start 661 drivers/mmc/host/rtsx_pci_sdmmc.c len = sd_get_phase_len(phase_map, start); start 663 drivers/mmc/host/rtsx_pci_sdmmc.c start_final = start; start 666 drivers/mmc/host/rtsx_pci_sdmmc.c start += len ? len : 1; start 630 drivers/mmc/host/rtsx_usb_sdmmc.c int start = 0, len = 0; start 639 drivers/mmc/host/rtsx_usb_sdmmc.c while (start < MAX_PHASE + 1) { start 640 drivers/mmc/host/rtsx_usb_sdmmc.c len = get_phase_len(phase_map, start); start 642 drivers/mmc/host/rtsx_usb_sdmmc.c start_final = start; start 645 drivers/mmc/host/rtsx_usb_sdmmc.c start += len ? len : 1; start 1075 drivers/mmc/host/s3cmci.c .src_addr = host->mem->start + host->sdidata, start 1076 drivers/mmc/host/s3cmci.c .dst_addr = host->mem->start + host->sdidata, start 1599 drivers/mmc/host/s3cmci.c host->mem = request_mem_region(host->mem->start, start 1608 drivers/mmc/host/s3cmci.c host->base = ioremap(host->mem->start, resource_size(host->mem)); start 1727 drivers/mmc/host/s3cmci.c release_mem_region(host->mem->start, resource_size(host->mem)); start 1777 drivers/mmc/host/s3cmci.c release_mem_region(host->mem->start, resource_size(host->mem)); start 717 drivers/mmc/host/sdhci-acpi.c if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) start 741 drivers/mmc/host/sdhci-acpi.c host->ioaddr = devm_ioremap_nocache(dev, iomem->start, start 136 drivers/mmc/host/sdhci-of-aspeed.c if (res->start < dev->parent->res->start) start 139 drivers/mmc/host/sdhci-of-aspeed.c delta = res->start - dev->parent->res->start; start 90 drivers/mmc/host/sdhci-pxav3.c regs = ioremap(res->start, resource_size(res)); start 80 drivers/mmc/host/sdhci-sirf.c int start = -1, end = 0, tuning_value = -1, range = 0; start 100 drivers/mmc/host/sdhci-sirf.c if (start == -1) start 101 drivers/mmc/host/sdhci-sirf.c start = phase; start 106 drivers/mmc/host/sdhci-sirf.c tuning_value = (start + end) / 2; start 111 drivers/mmc/host/sdhci-sirf.c tuning_value = (start + end) / 2; start 114 drivers/mmc/host/sdhci-sirf.c start = -1; start 411 drivers/mmc/host/sh_mmcif.c cfg.src_addr = res->start + MMCIF_CE_DATA; start 414 drivers/mmc/host/sh_mmcif.c cfg.dst_addr = res->start + MMCIF_CE_DATA; start 114 drivers/mmc/host/tmio_mmc.h void (*start)(struct tmio_mmc_host *host, struct mmc_data *data); start 57 drivers/mmc/host/tmio_mmc_core.c host->dma_ops->start(host, data); start 224 drivers/mmc/host/uniphier-sd.c .start = uniphier_sd_external_dma_start, start 349 drivers/mmc/host/uniphier-sd.c .start = uniphier_sd_internal_dma_start, start 672 drivers/mmc/host/usdhi6rol0.c static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) start 688 drivers/mmc/host/usdhi6rol0.c cfg.dst_addr = start + USDHI6_SD_BUF0; start 1836 drivers/mmc/host/usdhi6rol0.c usdhi6_dma_request(host, res->start); start 914 drivers/mmc/host/wmt-sdmmc.c release_mem_region(res->start, resource_size(res)); start 780 drivers/mtd/chips/cfi_cmdset_0001.c chip->start += j << partshift; start 969 drivers/mtd/chips/cfi_cmdset_0001.c ret = chip_ready(map, contender, contender->start, mode); start 985 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, contender, contender->start); start 1035 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, loaner, loaner->start); start 1144 drivers/mtd/chips/cfi_cmdset_0001.c unsigned long usec, suspended, start, done; start 1147 drivers/mtd/chips/cfi_cmdset_0001.c start = xip_currtime(); start 1231 drivers/mtd/chips/cfi_cmdset_0001.c start = xip_currtime(); start 1241 drivers/mtd/chips/cfi_cmdset_0001.c done = xip_elapsed_since(start); start 1358 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 1397 drivers/mtd/chips/cfi_cmdset_0001.c *virt = map->virt + cfi->chips[chipnum].start + ofs; start 1399 drivers/mtd/chips/cfi_cmdset_0001.c *phys = map->phys + cfi->chips[chipnum].start + ofs; start 1409 drivers/mtd/chips/cfi_cmdset_0001.c last_end = cfi->chips[chipnum].start; start 1410 drivers/mtd/chips/cfi_cmdset_0001.c else if (cfi->chips[chipnum].start != last_end) start 1468 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, chip->start); start 1485 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 1555 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 1716 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 1939 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 2039 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, chip->start, FL_SYNCING); start 2076 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 2108 drivers/mtd/chips/cfi_cmdset_0001.c adr += chip->start; start 2229 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); start 2236 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); start 2238 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, chip->start); start 2240 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x90), chip->start); start 2243 drivers/mtd/chips/cfi_cmdset_0001.c map_copy_from(map, buf, chip->start + offset, size); start 2244 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, chip->start); start 2247 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); start 2249 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, chip->start); start 2382 drivers/mtd/chips/cfi_cmdset_0001.c otpinfo->start = from; start 2529 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xFF), cfi->chips[i].start); start 2616 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xFF), cfi->chips[i].start); start 2642 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); start 2644 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xff), chip->start); start 2646 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, chip->start); start 135 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, start 824 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, start 865 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, start 1067 drivers/mtd/chips/cfi_cmdset_0002.c unsigned long suspended, start = xip_currtime(); start 1086 drivers/mtd/chips/cfi_cmdset_0002.c usec -= xip_elapsed_since(start); start 1139 drivers/mtd/chips/cfi_cmdset_0002.c start = xip_currtime(); start 1150 drivers/mtd/chips/cfi_cmdset_0002.c && xip_elapsed_since(start) < usec); start 1214 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 1285 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 1287 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 1289 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, start 1292 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); start 1300 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 1302 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 1304 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, start 1306 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, start 1309 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); start 1334 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 1434 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, chip->start, FL_LOCKING); start 1442 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 1444 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 1446 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, start 1457 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xA0), chip->start); start 1458 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(lockreg), chip->start); start 1475 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x90), chip->start); start 1476 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), chip->start); start 1479 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, chip->start); start 1511 drivers/mtd/chips/cfi_cmdset_0002.c base = chip->start; start 1545 drivers/mtd/chips/cfi_cmdset_0002.c chip->start, map, cfi, start 1548 drivers/mtd/chips/cfi_cmdset_0002.c chip->start, map, cfi, start 1551 drivers/mtd/chips/cfi_cmdset_0002.c chip->start, map, cfi, start 1556 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x90), chip->start); start 1557 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), chip->start); start 1558 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, chip->start); start 1578 drivers/mtd/chips/cfi_cmdset_0002.c otpinfo->start = from; start 1667 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 1668 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 1669 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 1784 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 1803 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 1832 drivers/mtd/chips/cfi_cmdset_0002.c chipstart = cfi->chips[chipnum].start; start 1904 drivers/mtd/chips/cfi_cmdset_0002.c chipstart = cfi->chips[chipnum].start; start 2011 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 2013 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 2015 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, start 2034 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 2053 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2054 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 2208 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 2246 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 2270 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2271 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 2272 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2285 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 2323 drivers/mtd/chips/cfi_cmdset_0002.c chipstart = cfi->chips[chipnum].start; start 2384 drivers/mtd/chips/cfi_cmdset_0002.c chipstart = cfi->chips[chipnum].start; start 2435 drivers/mtd/chips/cfi_cmdset_0002.c __func__, chip->start); start 2442 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2443 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 2444 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2445 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2446 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 2447 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2497 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 2524 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 2541 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2542 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 2543 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2544 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); start 2545 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); start 2596 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 2642 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); start 2649 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 2651 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 2653 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, start 2655 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 2657 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 2659 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x40), chip->start + adr); start 2662 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr + chip->start); start 2677 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); start 2684 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 2689 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr + chip->start); start 2729 drivers/mtd/chips/cfi_cmdset_0002.c adr += chip->start; start 2739 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, start 2741 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, start 2744 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, start 2757 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x80), chip->start); start 2758 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x30), chip->start); start 2784 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x90), chip->start); start 2785 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), chip->start); start 3044 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 3074 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); start 3076 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); start 3078 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, chip->start); start 257 drivers/mtd/chips/cfi_cmdset_0020.c adr += chip->start; start 435 drivers/mtd/chips/cfi_cmdset_0020.c adr += chip->start; start 743 drivers/mtd/chips/cfi_cmdset_0020.c adr += chip->start; start 1041 drivers/mtd/chips/cfi_cmdset_0020.c adr += chip->start; start 1188 drivers/mtd/chips/cfi_cmdset_0020.c adr += chip->start; start 127 drivers/mtd/chips/cfi_probe.c unsigned long start; start 132 drivers/mtd/chips/cfi_probe.c start = i << cfi->chipshift; start 135 drivers/mtd/chips/cfi_probe.c if (cfi_qry_present(map, start, cfi)) { start 138 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_off(start, map, cfi); start 141 drivers/mtd/chips/cfi_probe.c if (!cfi_qry_present(map, start, cfi)) { start 144 drivers/mtd/chips/cfi_probe.c map->name, base, start); start 156 drivers/mtd/chips/cfi_probe.c map->name, base, start); start 37 drivers/mtd/chips/fwh_lock.h if (chip->start < 0x400000) { start 39 drivers/mtd/chips/fwh_lock.h __func__, chip->start ); start 54 drivers/mtd/chips/fwh_lock.h adr += chip->start - 0x400000; start 153 drivers/mtd/chips/gen_probe.c pchip->start = (i << cfi.chipshift); start 2230 drivers/mtd/chips/jedec_probe.c unsigned long start; start 2234 drivers/mtd/chips/jedec_probe.c start = i << cfi->chipshift; start 2235 drivers/mtd/chips/jedec_probe.c if (jedec_read_mfr(map, start, cfi) == cfi->mfr && start 2236 drivers/mtd/chips/jedec_probe.c jedec_read_id(map, start, cfi) == cfi->id) { start 2239 drivers/mtd/chips/jedec_probe.c jedec_reset(start, map, cfi); start 2245 drivers/mtd/chips/jedec_probe.c map->name, base, start); start 2257 drivers/mtd/chips/jedec_probe.c map->name, base, start); start 302 drivers/mtd/devices/bcm47xxsflash.c if (!devm_request_mem_region(dev, res->start, resource_size(res), start 323 drivers/mtd/devices/bcm47xxsflash.c b47s->window = ioremap_nocache(res->start, resource_size(res)); start 325 drivers/mtd/devices/bcm47xxsflash.c b47s->window = ioremap_cache(res->start, resource_size(res)); start 1978 drivers/mtd/devices/docg3.c base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE); start 125 drivers/mtd/devices/ms02-nv.c mod_res->start = addr; start 157 drivers/mtd/devices/ms02-nv.c diag_res->start = addr; start 170 drivers/mtd/devices/ms02-nv.c user_res->start = addr + MS02NV_RAM; start 183 drivers/mtd/devices/ms02-nv.c csr_res->start = addr + MS02NV_CSR; start 443 drivers/mtd/devices/mtd_dataflash.c info->start = 0; start 39 drivers/mtd/devices/phram.c u_char *start = mtd->priv; start 41 drivers/mtd/devices/phram.c memset(start + instr->addr, 0xff, instr->len); start 62 drivers/mtd/devices/phram.c u_char *start = mtd->priv; start 64 drivers/mtd/devices/phram.c memcpy(buf, start + from, len); start 72 drivers/mtd/devices/phram.c u_char *start = mtd->priv; start 74 drivers/mtd/devices/phram.c memcpy(start + to, buf, len); start 91 drivers/mtd/devices/phram.c static int register_device(char *name, phys_addr_t start, size_t len) start 101 drivers/mtd/devices/phram.c new->mtd.priv = ioremap(start, len); start 221 drivers/mtd/devices/phram.c uint64_t start; start 244 drivers/mtd/devices/phram.c ret = parse_num64(&start, token[1]); start 256 drivers/mtd/devices/phram.c ret = register_device(name, start, len); start 260 drivers/mtd/devices/phram.c pr_info("%s device: %#llx at %#llx\n", name, len, start); start 123 drivers/mtd/devices/pmc551.c u_char *start; start 210 drivers/mtd/devices/pmc551.c *virt = priv->start + soff_lo; start 743 drivers/mtd/devices/pmc551.c priv->start = pci_iomap(PCI_Device, 0, priv->asize); start 745 drivers/mtd/devices/pmc551.c if (!priv->start) { start 783 drivers/mtd/devices/pmc551.c pci_iounmap(PCI_Device, priv->start); start 795 drivers/mtd/devices/pmc551.c priv->start, priv->start + priv->asize); start 830 drivers/mtd/devices/pmc551.c if (priv->start) { start 832 drivers/mtd/devices/pmc551.c "0x%p\n", priv->asize >> 20, priv->start); start 833 drivers/mtd/devices/pmc551.c pci_iounmap(priv->dev, priv->start); start 56 drivers/mtd/devices/slram.c u_char *start; start 87 drivers/mtd/devices/slram.c memset(priv->start + instr->addr, 0xff, instr->len); start 97 drivers/mtd/devices/slram.c *virt = priv->start + from; start 112 drivers/mtd/devices/slram.c memcpy(buf, priv->start + from, len); start 122 drivers/mtd/devices/slram.c memcpy(priv->start + to, buf, len); start 129 drivers/mtd/devices/slram.c static int register_device(char *name, unsigned long start, unsigned long length) start 161 drivers/mtd/devices/slram.c if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start = start 162 drivers/mtd/devices/slram.c memremap(start, length, start 168 drivers/mtd/devices/slram.c ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start + length; start 186 drivers/mtd/devices/slram.c memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); start 192 drivers/mtd/devices/slram.c (start / 1024), ((start + length) / 1024)); start 194 drivers/mtd/devices/slram.c ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start, start 206 drivers/mtd/devices/slram.c memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); start 430 drivers/mtd/lpddr/lpddr2_nvm.c .phys = add_range->start, start 82 drivers/mtd/lpddr/lpddr_cmds.c chip->start += j << lpddr->chipshift; start 523 drivers/mtd/lpddr/lpddr_cmds.c *mtdbuf = (void *)map->virt + chip->start + ofs; start 533 drivers/mtd/lpddr/lpddr_cmds.c last_end = chip->start; start 534 drivers/mtd/lpddr/lpddr_cmds.c else if (chip->start != last_end) start 147 drivers/mtd/maps/amd76xrom.c window->rsrc.start = window->phys; start 246 drivers/mtd/maps/amd76xrom.c map->rsrc.start = map->map.phys; start 261 drivers/mtd/maps/amd76xrom.c cfi->chips[i].start += offset; start 176 drivers/mtd/maps/ck804xrom.c window->rsrc.start = window->phys; start 276 drivers/mtd/maps/ck804xrom.c map->rsrc.start = map->map.phys; start 291 drivers/mtd/maps/ck804xrom.c cfi->chips[i].start += offset; start 241 drivers/mtd/maps/esb2rom.c window->rsrc.start = window->phys; start 337 drivers/mtd/maps/esb2rom.c map->rsrc.start = map->map.phys; start 352 drivers/mtd/maps/esb2rom.c cfi->chips[i].start += offset; start 176 drivers/mtd/maps/ichxrom.c window->rsrc.start = window->phys; start 273 drivers/mtd/maps/ichxrom.c map->rsrc.start = map->map.phys; start 288 drivers/mtd/maps/ichxrom.c cfi->chips[i].start += offset; start 178 drivers/mtd/maps/ixp4xx.c .origin = dev->resource->start, start 99 drivers/mtd/maps/l440gx.c pm_iobase->start = 0; start 106 drivers/mtd/maps/l440gx.c pm_iobase->start += iobase & ~1; start 121 drivers/mtd/maps/l440gx.c iobase = pm_iobase->start; start 132 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->phys = ltq_mtd->res->start; start 498 drivers/mtd/maps/pcmciamtd.c link->resource[2]->start = 0; start 512 drivers/mtd/maps/pcmciamtd.c link->resource[2]->start = 0; start 534 drivers/mtd/maps/pcmciamtd.c dev->win_base = ioremap(link->resource[2]->start, start 506 drivers/mtd/maps/physmap-core.c info->maps[i].phys = res->start; start 614 drivers/mtd/maps/physmap-core.c .start = CONFIG_MTD_PHYSMAP_START, start 100 drivers/mtd/maps/pismo.c res.start = base; start 135 drivers/mtd/maps/plat-ram.c (unsigned long long)res->start); start 139 drivers/mtd/maps/plat-ram.c info->map.phys = res->start; start 28 drivers/mtd/maps/pxa2xx-flash.c unsigned long start = (unsigned long)map->cached + from; start 29 drivers/mtd/maps/pxa2xx-flash.c unsigned long end = start + len; start 31 drivers/mtd/maps/pxa2xx-flash.c start &= ~(CACHELINESIZE - 1); start 32 drivers/mtd/maps/pxa2xx-flash.c while (start < end) { start 34 drivers/mtd/maps/pxa2xx-flash.c asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)); start 35 drivers/mtd/maps/pxa2xx-flash.c start += CACHELINESIZE; start 62 drivers/mtd/maps/pxa2xx-flash.c info->map.phys = res->start; start 70 drivers/mtd/maps/rbtx4939-flash.c if (!devm_request_mem_region(&dev->dev, res->start, size, start 75 drivers/mtd/maps/rbtx4939-flash.c info->map.phys = res->start; start 73 drivers/mtd/maps/sa1100-flash.c phys = res->start; start 122 drivers/mtd/maps/scx200_docflash.c docmem.start = base; start 153 drivers/mtd/maps/scx200_docflash.c printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl); start 155 drivers/mtd/maps/scx200_docflash.c pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start); start 178 drivers/mtd/maps/scx200_docflash.c scx200_docflash_map.phys = docmem.start; start 179 drivers/mtd/maps/scx200_docflash.c scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size); start 60 drivers/mtd/maps/sun_uflash.c dp, (unsigned long long)op->resource[0].start); start 80 drivers/mtd/maps/sun_uflash.c up->map.phys = op->resource[0].start; start 349 drivers/mtd/mtdchar.c uint64_t start, uint32_t length, void __user *ptr, start 367 drivers/mtd/mtdchar.c ops.ooboffs = start & (mtd->writesize - 1); start 379 drivers/mtd/mtdchar.c start &= ~((uint64_t)mtd->writesize - 1); start 380 drivers/mtd/mtdchar.c ret = mtd_write_oob(mtd, start, &ops); start 393 drivers/mtd/mtdchar.c uint64_t start, uint32_t length, void __user *ptr, start 404 drivers/mtd/mtdchar.c ops.ooboffs = start & (mtd->writesize - 1); start 416 drivers/mtd/mtdchar.c start &= ~((uint64_t)mtd->writesize - 1); start 417 drivers/mtd/mtdchar.c ret = mtd_read_oob(mtd, start, &ops); start 572 drivers/mtd/mtdchar.c return mtd_add_partition(mtd, p.devname, p.start, p.length); start 626 drivers/mtd/mtdchar.c ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); start 706 drivers/mtd/mtdchar.c erase->addr = einfo64.start; start 716 drivers/mtd/mtdchar.c erase->addr = einfo32.start; start 735 drivers/mtd/mtdchar.c ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, start 749 drivers/mtd/mtdchar.c ret = mtdchar_readoob(file, mtd, buf.start, buf.length, start 750 drivers/mtd/mtdchar.c buf.ptr, &buf_user->start); start 762 drivers/mtd/mtdchar.c ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, start 776 drivers/mtd/mtdchar.c ret = mtdchar_readoob(file, mtd, buf.start, buf.length, start 796 drivers/mtd/mtdchar.c ret = mtd_lock(mtd, einfo.start, einfo.length); start 807 drivers/mtd/mtdchar.c ret = mtd_unlock(mtd, einfo.start, einfo.length); start 818 drivers/mtd/mtdchar.c ret = mtd_is_locked(mtd, einfo.start, einfo.length); start 912 drivers/mtd/mtdchar.c ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); start 1008 drivers/mtd/mtdchar.c u_int32_t start; start 1035 drivers/mtd/mtdchar.c ret = mtdchar_writeoob(file, mtd, buf.start, start 1050 drivers/mtd/mtdchar.c ret = mtdchar_readoob(file, mtd, buf.start, start 1052 drivers/mtd/mtdchar.c &buf_user->start); start 1448 drivers/mtd/mtdcore.c const u8 *oobbuf, int start, int nbytes, start 1456 drivers/mtd/mtdcore.c ret = mtd_ooblayout_find_region(mtd, start, §ion, start 1491 drivers/mtd/mtdcore.c u8 *oobbuf, int start, int nbytes, start 1499 drivers/mtd/mtdcore.c ret = mtd_ooblayout_find_region(mtd, start, §ion, start 1563 drivers/mtd/mtdcore.c const u8 *oobbuf, int start, int nbytes) start 1565 drivers/mtd/mtdcore.c return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, start 1583 drivers/mtd/mtdcore.c u8 *oobbuf, int start, int nbytes) start 1585 drivers/mtd/mtdcore.c return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, start 1603 drivers/mtd/mtdcore.c const u8 *oobbuf, int start, int nbytes) start 1605 drivers/mtd/mtdcore.c return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, start 1623 drivers/mtd/mtdcore.c u8 *oobbuf, int start, int nbytes) start 1625 drivers/mtd/mtdcore.c return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, start 44 drivers/mtd/nand/onenand/generic.c if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) { start 49 drivers/mtd/nand/onenand/generic.c info->onenand.base = ioremap(res->start, size); start 76 drivers/mtd/nand/onenand/generic.c release_mem_region(res->start, size); start 91 drivers/mtd/nand/onenand/generic.c release_mem_region(res->start, size); start 489 drivers/mtd/nand/onenand/omap2.c c->phys_base = res->start; start 2504 drivers/mtd/nand/onenand/onenand_base.c int start, end, block, value, status; start 2507 drivers/mtd/nand/onenand/onenand_base.c start = onenand_block(this, ofs); start 2518 drivers/mtd/nand/onenand/onenand_base.c this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS); start 2542 drivers/mtd/nand/onenand/onenand_base.c for (block = start; block < end + 1; block++) { start 3079 drivers/mtd/nand/onenand/onenand_base.c otpinfo->start = from; start 3513 drivers/mtd/nand/onenand/onenand_base.c static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end) start 3527 drivers/mtd/nand/onenand/onenand_base.c printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end); start 3529 drivers/mtd/nand/onenand/onenand_base.c for (block = start; block <= end; block++) { start 750 drivers/mtd/nand/onenand/samsung.c int start, end, start_mem_addr, end_mem_addr; start 752 drivers/mtd/nand/onenand/samsung.c start = ofs >> this->erase_shift; start 753 drivers/mtd/nand/onenand/samsung.c start_mem_addr = onenand->mem_addr(start, 0, 0); start 754 drivers/mtd/nand/onenand/samsung.c end = start + (len >> this->erase_shift) - 1; start 868 drivers/mtd/nand/onenand/samsung.c onenand->phys_base = r->start; start 909 drivers/mtd/nand/onenand/samsung.c err = devm_request_irq(&pdev->dev, r->start, start 1615 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].io.dma = res.start; start 1737 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].io.dma = res->start; start 2174 drivers/mtd/nand/raw/atmel/nand-controller.c nc->sram.dma = res.start; start 359 drivers/mtd/nand/raw/au1550nd.c unsigned long addr, staddr, start, mask, end; start 366 drivers/mtd/nand/raw/au1550nd.c start = (staddr << 4) & 0xfffc0000; start 368 drivers/mtd/nand/raw/au1550nd.c end = (start | (start - 1)) & ~(start ^ mask); start 369 drivers/mtd/nand/raw/au1550nd.c if ((nand_base >= start) && (nand_base < end)) start 401 drivers/mtd/nand/raw/au1550nd.c if (request_mem_region(r->start, resource_size(r), "au1550-nand")) { start 407 drivers/mtd/nand/raw/au1550nd.c ctx->base = ioremap_nocache(r->start, 0x1000); start 419 drivers/mtd/nand/raw/au1550nd.c cs = find_nand_cs(r->start); start 459 drivers/mtd/nand/raw/au1550nd.c release_mem_region(r->start, resource_size(r)); start 472 drivers/mtd/nand/raw/au1550nd.c release_mem_region(r->start, 0x1000); start 735 drivers/mtd/nand/raw/davinci_nand.c base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2)); start 879 drivers/mtd/nand/raw/fsl_elbc_nand.c == fsl_lbc_addr(res.start)) start 914 drivers/mtd/nand/raw/fsl_elbc_nand.c priv->vbase = ioremap(res.start, resource_size(&res)); start 922 drivers/mtd/nand/raw/fsl_elbc_nand.c mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); start 944 drivers/mtd/nand/raw/fsl_elbc_nand.c (unsigned long long)res.start, priv->bank); start 1000 drivers/mtd/nand/raw/fsl_ifc_nand.c if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) start 1038 drivers/mtd/nand/raw/fsl_ifc_nand.c priv->vbase = ioremap(res.start, resource_size(&res)); start 1059 drivers/mtd/nand/raw/fsl_ifc_nand.c mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); start 1081 drivers/mtd/nand/raw/fsl_ifc_nand.c (unsigned long long)res.start, priv->bank); start 181 drivers/mtd/nand/raw/fsl_upm.c mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%pOFn", (u64)io_res->start, start 220 drivers/mtd/nand/raw/fsl_upm.c ret = fsl_upm_find(io_res.start, &fun->upm); start 288 drivers/mtd/nand/raw/fsl_upm.c fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, start 998 drivers/mtd/nand/raw/fsmc_nand.c host->data_pa = (dma_addr_t)res->start; start 143 drivers/mtd/nand/raw/gpio.c r->start = addr; start 144 drivers/mtd/nand/raw/gpio.c r->end = r->start + 0x3; start 1130 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this); start 694 drivers/mtd/nand/raw/lpc32xx_mlc.c host->io_base_phy = rc->start; start 834 drivers/mtd/nand/raw/lpc32xx_slc.c host->io_base_dma = rc->start; start 2759 drivers/mtd/nand/raw/marvell_nand.c config.src_addr = r->start + NDDB; start 2760 drivers/mtd/nand/raw/marvell_nand.c config.dst_addr = r->start + NDDB; start 667 drivers/mtd/nand/raw/mpc5121_nfc.c regs_paddr = res.start; start 650 drivers/mtd/nand/raw/mtk_nand.c u32 start, end; start 653 drivers/mtd/nand/raw/mtk_nand.c start = offset / chip->ecc.size; start 661 drivers/mtd/nand/raw/mtk_nand.c if (start > i || i >= end) start 699 drivers/mtd/nand/raw/mtk_nand.c static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start, start 710 drivers/mtd/nand/raw/mtk_nand.c oobptr = oob_ptr(chip, start + i); start 884 drivers/mtd/nand/raw/mtk_nand.c static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start, start 898 drivers/mtd/nand/raw/mtk_nand.c memset(oob_ptr(chip, start + i), 0xff, reg_size); start 916 drivers/mtd/nand/raw/mtk_nand.c u32 column, sectors, start, end, reg; start 923 drivers/mtd/nand/raw/mtk_nand.c start = data_offs / chip->ecc.size; start 926 drivers/mtd/nand/raw/mtk_nand.c sectors = end - start; start 927 drivers/mtd/nand/raw/mtk_nand.c column = start * (chip->ecc.size + spare); start 930 drivers/mtd/nand/raw/mtk_nand.c buf = bufpoi + start * chip->ecc.size; start 988 drivers/mtd/nand/raw/mtk_nand.c mtk_nfc_update_ecc_stats(mtd, buf, start, sectors); start 989 drivers/mtd/nand/raw/mtk_nand.c mtk_nfc_read_fdm(chip, start, sectors); start 999 drivers/mtd/nand/raw/mtk_nand.c if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec) start 2219 drivers/mtd/nand/raw/omap2.c info->phys_base = res->start; start 404 drivers/mtd/nand/raw/omap_elm.c ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0, start 2938 drivers/mtd/nand/raw/qcom_nandc.c nandc->base_phys = res->start; start 2939 drivers/mtd/nand/raw/qcom_nandc.c nandc->base_dma = dma_map_resource(dev, res->start, start 2974 drivers/mtd/nand/raw/qcom_nandc.c dma_unmap_resource(dev, res->start, resource_size(res), start 1129 drivers/mtd/nand/raw/sh_flctl.c flctl->fifo = res->start + 0x24; /* FLDTFIFO */ start 129 drivers/mtd/nand/raw/sharpsl.c sharpsl->io = ioremap(r->start, resource_size(r)); start 1889 drivers/mtd/nand/raw/stm32_fmc2_nand.c fmc2->io_phys_addr = res->start; start 1901 drivers/mtd/nand/raw/stm32_fmc2_nand.c fmc2->data_phys_addr[chip_cs] = res->start; start 1776 drivers/mtd/nand/raw/sunxi_nand.c unsigned int i, j, remaining, start; start 1799 drivers/mtd/nand/raw/sunxi_nand.c start = nand_subop_get_addr_start_off(subop, i); start 1800 drivers/mtd/nand/raw/sunxi_nand.c for (j = 0; j < 8 && j + start < remaining; j++) { start 1801 drivers/mtd/nand/raw/sunxi_nand.c u32 addr = instr->ctx.addr.addrs[j + start]; start 1813 drivers/mtd/nand/raw/sunxi_nand.c start = nand_subop_get_data_start_off(subop, i); start 1821 drivers/mtd/nand/raw/sunxi_nand.c instr->ctx.data.buf.out + start, start 1824 drivers/mtd/nand/raw/sunxi_nand.c inbuf = instr->ctx.data.buf.in + start; start 2132 drivers/mtd/nand/raw/sunxi_nand.c dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data; start 388 drivers/mtd/nand/raw/tmio_nand.c tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr)); start 392 drivers/mtd/nand/raw/tmio_nand.c tmio->fcr_base = fcr->start & 0xfffff; start 393 drivers/mtd/nand/raw/tmio_nand.c tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr)); start 283 drivers/mtd/parsers/sharpslpart.c __le32 start; start 313 drivers/mtd/parsers/sharpslpart.c if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) || start 314 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) || start 315 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) || start 316 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) || start 317 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) { start 373 drivers/mtd/parsers/sharpslpart.c sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start); start 375 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[0].start); start 378 drivers/mtd/parsers/sharpslpart.c sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start); start 380 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[1].start); start 383 drivers/mtd/parsers/sharpslpart.c sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start); start 385 drivers/mtd/parsers/sharpslpart.c le32_to_cpu(buf[2].start); start 195 drivers/mtd/spi-nor/aspeed-smc.c #define SEGMENT_ADDR_VALUE(start, end) \ start 196 drivers/mtd/spi-nor/aspeed-smc.c (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24)) start 449 drivers/mtd/spi-nor/aspeed-smc.c offset = SEGMENT_ADDR_START(reg) - res->start; start 462 drivers/mtd/spi-nor/aspeed-smc.c static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start, start 480 drivers/mtd/spi-nor/aspeed-smc.c size = SEGMENT_ADDR_END(seg_oldval) - start; start 486 drivers/mtd/spi-nor/aspeed-smc.c if (start + size > ahb_base_phy + controller->ahb_window_size) { start 487 drivers/mtd/spi-nor/aspeed-smc.c size = ahb_base_phy + controller->ahb_window_size - start; start 492 drivers/mtd/spi-nor/aspeed-smc.c end = start + size; start 493 drivers/mtd/spi-nor/aspeed-smc.c seg_newval = SEGMENT_ADDR_VALUE(start, end); start 504 drivers/mtd/spi-nor/aspeed-smc.c start = SEGMENT_ADDR_START(seg_oldval); start 506 drivers/mtd/spi-nor/aspeed-smc.c size = end - start; start 510 drivers/mtd/spi-nor/aspeed-smc.c cs, start, end, size >> 20); start 526 drivers/mtd/spi-nor/aspeed-smc.c u32 ahb_base_phy, start; start 568 drivers/mtd/spi-nor/aspeed-smc.c start = SEGMENT_ADDR_END(prev); start 570 drivers/mtd/spi-nor/aspeed-smc.c start = ahb_base_phy; start 573 drivers/mtd/spi-nor/aspeed-smc.c size = chip_set_segment(chip, chip->cs, start, size); start 576 drivers/mtd/spi-nor/aspeed-smc.c chip->ahb_base = controller->ahb_base + (start - ahb_base_phy); start 584 drivers/mtd/spi-nor/aspeed-smc.c chip_set_segment(chip, chip->cs + 1, start + size, 0); start 1362 drivers/mtd/spi-nor/cadence-quadspi.c cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; start 41 drivers/mtd/tests/speedtest.c static ktime_t start, finish; start 153 drivers/mtd/tests/speedtest.c start = ktime_get(); start 166 drivers/mtd/tests/speedtest.c ms = ktime_ms_delta(finish, start); start 71 drivers/mtd/tests/torturetest.c static ktime_t start, finish; start 77 drivers/mtd/tests/torturetest.c start = ktime_get(); start 325 drivers/mtd/tests/torturetest.c ms = ktime_ms_delta(finish, start); start 366 drivers/mtd/tests/torturetest.c static void print_bufs(unsigned char *read, unsigned char *written, int start, start 417 drivers/mtd/tests/torturetest.c static void print_bufs(unsigned char *read, unsigned char *written, int start, start 425 drivers/mtd/tests/torturetest.c printk("0x%08x: ", start + i); start 428 drivers/mtd/tests/torturetest.c printk(" %02x", read[start + i + j1]); start 429 drivers/mtd/tests/torturetest.c if (read[start + i + j1] != written[start + i + j1]) start 441 drivers/mtd/tests/torturetest.c printk(" %02x", written[start + i + j2]); start 1375 drivers/mtd/ubi/attach.c int start) start 1392 drivers/mtd/ubi/attach.c for (pnum = start; pnum < ubi->peb_count; pnum++) { start 281 drivers/mtd/ubi/block.c geo->start = 0; start 157 drivers/mtd/ubi/cdev.c static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, start 453 drivers/mtd/ubi/debug.c .start = eraseblk_count_seq_start, start 204 drivers/net/arcnet/com20020_cs.c (unsigned int)link->resource[0]->start); start 209 drivers/net/arcnet/com20020_cs.c if (!link->resource[0]->start) { start 211 drivers/net/arcnet/com20020_cs.c link->resource[0]->start = ioaddr; start 225 drivers/net/arcnet/com20020_cs.c ioaddr = dev->base_addr = link->resource[0]->start; start 256 drivers/net/bonding/bond_procfs.c .start = bond_info_seq_start, start 1298 drivers/net/can/at91_can.c if (!request_mem_region(res->start, start 1305 drivers/net/can/at91_can.c addr = ioremap_nocache(res->start, resource_size(res)); start 1361 drivers/net/can/at91_can.c release_mem_region(res->start, resource_size(res)); start 1379 drivers/net/can/at91_can.c release_mem_region(res->start, resource_size(res)); start 173 drivers/net/can/c_can/c_can.h u8 start; start 103 drivers/net/can/c_can/c_can_platform.c mask = 1 << raminit->bits.start | 1 << raminit->bits.done; start 119 drivers/net/can/c_can/c_can_platform.c c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl); start 123 drivers/net/can/c_can/c_can_platform.c ctrl |= 1 << raminit->bits.start; start 131 drivers/net/can/c_can/c_can_platform.c ctrl &= ~(1 << raminit->bits.start); start 201 drivers/net/can/c_can/c_can_platform.c [0] = { .start = 3, .done = 1, }, start 202 drivers/net/can/c_can/c_can_platform.c [1] = { .start = 5, .done = 2, }, start 213 drivers/net/can/c_can/c_can_platform.c [0] = { .start = 0, .done = 8, }, start 214 drivers/net/can/c_can/c_can_platform.c [1] = { .start = 1, .done = 9, }, start 174 drivers/net/can/cc770/cc770_platform.c if (!request_mem_region(mem->start, mem_size, pdev->name)) start 177 drivers/net/can/cc770/cc770_platform.c base = ioremap(mem->start, mem_size); start 228 drivers/net/can/cc770/cc770_platform.c release_mem_region(mem->start, mem_size); start 244 drivers/net/can/cc770/cc770_platform.c release_mem_region(mem->start, resource_size(mem)); start 1522 drivers/net/can/janz-ican3.c unsigned long start; start 1538 drivers/net/can/janz-ican3.c start = jiffies; start 1546 drivers/net/can/janz-ican3.c } while (time_before(jiffies, start + HZ / 2)); start 1953 drivers/net/can/janz-ican3.c mod->dpm = ioremap(res->start, resource_size(res)); start 1970 drivers/net/can/janz-ican3.c mod->ctrl = ioremap(res->start, resource_size(res)); start 1692 drivers/net/can/m_can/m_can.c int end, i, start; start 1697 drivers/net/can/m_can/m_can.c start = cdev->mcfg[MRAM_SIDF].off; start 1701 drivers/net/can/m_can/m_can.c for (i = start; i < end; i += 4) start 92 drivers/net/can/m_can/m_can_platform.c mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 717 drivers/net/can/rcar/rcar_canfd.c int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; start 721 drivers/net/can/rcar/rcar_canfd.c start = 0; /* Channel 0 always starts from 0th rule */ start 725 drivers/net/can/rcar/rcar_canfd.c start = RCANFD_GAFLCFG_GETRNC(0, cfg); start 729 drivers/net/can/rcar/rcar_canfd.c page = RCANFD_GAFL_PAGENUM(start); start 743 drivers/net/can/rcar/rcar_canfd.c rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); start 745 drivers/net/can/rcar/rcar_canfd.c rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); start 747 drivers/net/can/rcar/rcar_canfd.c rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); start 749 drivers/net/can/rcar/rcar_canfd.c rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start), start 270 drivers/net/can/sja1000/ems_pcmcia.c dev->resource[2]->start = dev->resource[2]->end = 0; start 293 drivers/net/can/sja1000/ems_pcmcia.c ems_pcmcia_add_card(dev, dev->resource[2]->start); start 664 drivers/net/can/sja1000/peak_pcmcia.c card->ioport_addr = ioport_map(pdev->resource[0]->start, start 228 drivers/net/can/sja1000/sja1000_platform.c if (!devm_request_mem_region(&pdev->dev, res_mem->start, start 232 drivers/net/can/sja1000/sja1000_platform.c addr = devm_ioremap_nocache(&pdev->dev, res_mem->start, start 257 drivers/net/can/sja1000/sja1000_platform.c irq = res_irq->start; start 275 drivers/net/can/softing/softing_cs.c pdev->resource[0].start = pres->start; start 279 drivers/net/can/softing/softing_cs.c pdev->resource[1].start = pcmcia->irq; start 280 drivers/net/can/softing/softing_cs.c pdev->resource[1].end = pdev->resource[1].start; start 778 drivers/net/can/softing/softing_main.c card->dpram_phys = pres->start; start 788 drivers/net/can/softing/softing_main.c card->irq.nr = pres->start; start 921 drivers/net/can/ti_hecc.c ndev->irq = irq->start; start 432 drivers/net/can/usb/ems_usb.c u8 msg_count, start; start 436 drivers/net/can/usb/ems_usb.c start = CPC_HEADER_SIZE; start 439 drivers/net/can/usb/ems_usb.c msg = (struct ems_cpc_msg *)&ibuf[start]; start 465 drivers/net/can/usb/ems_usb.c start += CPC_MSG_HEADER_LEN + msg->length; start 468 drivers/net/can/usb/ems_usb.c if (start > urb->transfer_buffer_length) { start 140 drivers/net/dsa/bcm_sf2_cfp.c unsigned int start) start 145 drivers/net/dsa/bcm_sf2_cfp.c for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) { start 17 drivers/net/dsa/sja1105/sja1105_static_config.c void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len) start 19 drivers/net/dsa/sja1105/sja1105_static_config.c int rc = packing(buf, (u64 *)val, start, end, len, start 27 drivers/net/dsa/sja1105/sja1105_static_config.c start, end); start 29 drivers/net/dsa/sja1105/sja1105_static_config.c if ((start - end + 1) > 64) start 31 drivers/net/dsa/sja1105/sja1105_static_config.c start, end); start 34 drivers/net/dsa/sja1105/sja1105_static_config.c *val, start, end); start 39 drivers/net/dsa/sja1105/sja1105_static_config.c void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len) start 41 drivers/net/dsa/sja1105/sja1105_static_config.c int rc = packing((void *)buf, val, start, end, len, start 49 drivers/net/dsa/sja1105/sja1105_static_config.c start, end); start 52 drivers/net/dsa/sja1105/sja1105_static_config.c start, end); start 56 drivers/net/dsa/sja1105/sja1105_static_config.c void sja1105_packing(void *buf, u64 *val, int start, int end, start 59 drivers/net/dsa/sja1105/sja1105_static_config.c int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST); start 66 drivers/net/dsa/sja1105/sja1105_static_config.c start, end); start 68 drivers/net/dsa/sja1105/sja1105_static_config.c if ((start - end + 1) > 64) start 70 drivers/net/dsa/sja1105/sja1105_static_config.c start, end); start 73 drivers/net/dsa/sja1105/sja1105_static_config.c *val, start, end); start 331 drivers/net/dsa/sja1105/sja1105_static_config.h void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len); start 332 drivers/net/dsa/sja1105/sja1105_static_config.h void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len); start 333 drivers/net/dsa/sja1105/sja1105_static_config.h void sja1105_packing(void *buf, u64 *val, int start, int end, start 68 drivers/net/dummy.c unsigned int start; start 72 drivers/net/dummy.c start = u64_stats_fetch_begin_irq(&dstats->syncp); start 75 drivers/net/dummy.c } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); start 322 drivers/net/ethernet/3com/3c574_cs.c link->resource[0]->start = j ^ 0x300; start 339 drivers/net/ethernet/3com/3c574_cs.c dev->base_addr = link->resource[0]->start; start 262 drivers/net/ethernet/3com/3c589_cs.c link->resource[0]->start = j ^ 0x300; start 279 drivers/net/ethernet/3com/3c589_cs.c dev->base_addr = link->resource[0]->start; start 816 drivers/net/ethernet/8390/ax88796.c release_mem_region(mem->start, resource_size(mem)); start 821 drivers/net/ethernet/8390/ax88796.c release_mem_region(mem->start, resource_size(mem)); start 868 drivers/net/ethernet/8390/ax88796.c dev->irq = irq->start; start 895 drivers/net/ethernet/8390/ax88796.c if (!request_mem_region(mem->start, mem_size, pdev->name)) { start 901 drivers/net/ethernet/8390/ax88796.c ei_local->mem = ioremap(mem->start, mem_size); start 921 drivers/net/ethernet/8390/ax88796.c if (!request_mem_region(mem2->start, mem2_size, pdev->name)) { start 927 drivers/net/ethernet/8390/ax88796.c ax->map2 = ioremap(mem2->start, mem2_size); start 949 drivers/net/ethernet/8390/ax88796.c release_mem_region(mem2->start, mem2_size); start 955 drivers/net/ethernet/8390/ax88796.c release_mem_region(mem->start, mem_size); start 244 drivers/net/ethernet/8390/axnet_cs.c if (link->resource[0]->start == 0) { start 246 drivers/net/ethernet/8390/axnet_cs.c link->resource[0]->start = j ^ 0x300; start 247 drivers/net/ethernet/8390/axnet_cs.c link->resource[1]->start = (j ^ 0x300) + 0x10; start 297 drivers/net/ethernet/8390/axnet_cs.c dev->base_addr = link->resource[0]->start; start 88 drivers/net/ethernet/8390/hydra.c if (!request_mem_region(z->resource.start, 0x10000, "Hydra")) start 91 drivers/net/ethernet/8390/hydra.c release_mem_region(z->resource.start, 0x10000); start 115 drivers/net/ethernet/8390/hydra.c unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start); start 425 drivers/net/ethernet/8390/mcf8390.c if (!request_mem_region(mem->start, msize, pdev->name)) start 430 drivers/net/ethernet/8390/mcf8390.c release_mem_region(mem->start, msize); start 437 drivers/net/ethernet/8390/mcf8390.c dev->irq = irq->start; start 438 drivers/net/ethernet/8390/mcf8390.c dev->base_addr = mem->start; start 442 drivers/net/ethernet/8390/mcf8390.c release_mem_region(mem->start, msize); start 457 drivers/net/ethernet/8390/mcf8390.c release_mem_region(mem->start, resource_size(mem)); start 800 drivers/net/ethernet/8390/ne.c dev->base_addr = res->start; start 285 drivers/net/ethernet/8390/pcnet_cs.c link->resource[2]->start = 0; link->resource[2]->end = 0; start 290 drivers/net/ethernet/8390/pcnet_cs.c virt = ioremap(link->resource[2]->start, start 462 drivers/net/ethernet/8390/pcnet_cs.c if (link->resource[0]->start == 0) { start 464 drivers/net/ethernet/8390/pcnet_cs.c link->resource[0]->start = j ^ 0x300; start 465 drivers/net/ethernet/8390/pcnet_cs.c link->resource[1]->start = (j ^ 0x300) + 0x10; start 526 drivers/net/ethernet/8390/pcnet_cs.c dev->base_addr = link->resource[0]->start; start 1417 drivers/net/ethernet/8390/pcnet_cs.c link->resource[3]->start = 0; link->resource[3]->end = window_size; start 1429 drivers/net/ethernet/8390/pcnet_cs.c info->base = ioremap(link->resource[3]->start, start 252 drivers/net/ethernet/8390/xsurf100.c DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE, start 267 drivers/net/ethernet/8390/xsurf100.c if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) { start 272 drivers/net/ethernet/8390/xsurf100.c if (!request_mem_region(zdev->resource.start + start 291 drivers/net/ethernet/8390/xsurf100.c ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100); start 302 drivers/net/ethernet/8390/xsurf100.c ax88796_data.data_area = ioremap(zdev->resource.start + start 341 drivers/net/ethernet/8390/xsurf100.c release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE, start 345 drivers/net/ethernet/8390/xsurf100.c release_mem_region(zdev->resource.start, 0x100); start 358 drivers/net/ethernet/8390/xsurf100.c release_mem_region(zdev->resource.start, 0x100); start 360 drivers/net/ethernet/8390/xsurf100.c release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE, start 413 drivers/net/ethernet/8390/zorro8390.c board = z->resource.start; start 295 drivers/net/ethernet/alacritech/slic.h unsigned int start; \ start 297 drivers/net/ethernet/alacritech/slic.h start = u64_stats_fetch_begin_irq(&(st)->syncp); \ start 299 drivers/net/ethernet/alacritech/slic.h } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \ start 1328 drivers/net/ethernet/altera/altera_tse_main.c region = devm_request_mem_region(device, (*res)->start, start 1335 drivers/net/ethernet/altera/altera_tse_main.c *ptr = devm_ioremap_nocache(device, region->start, start 1390 drivers/net/ethernet/altera/altera_tse_main.c priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; start 1395 drivers/net/ethernet/altera/altera_tse_main.c priv->rxdescmem_busaddr = dma_res->start; start 1423 drivers/net/ethernet/altera/altera_tse_main.c priv->txdescmem_busaddr = dma_res->start; start 1431 drivers/net/ethernet/altera/altera_tse_main.c priv->rxdescmem_busaddr = dma_res->start; start 1541 drivers/net/ethernet/altera/altera_tse_main.c ndev->mem_start = control_port->start; start 1586 drivers/net/ethernet/altera/altera_tse_main.c (unsigned long) control_port->start, priv->rx_irq, start 120 drivers/net/ethernet/amazon/ena/ena_ethtool.c unsigned int start; start 123 drivers/net/ethernet/amazon/ena/ena_ethtool.c start = u64_stats_fetch_begin_irq(syncp); start 125 drivers/net/ethernet/amazon/ena/ena_ethtool.c } while (u64_stats_fetch_retry_irq(syncp, start)); start 2495 drivers/net/ethernet/amazon/ena/ena_netdev.c unsigned int start; start 2508 drivers/net/ethernet/amazon/ena/ena_netdev.c start = u64_stats_fetch_begin_irq(&tx_ring->syncp); start 2511 drivers/net/ethernet/amazon/ena/ena_netdev.c } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); start 2519 drivers/net/ethernet/amazon/ena/ena_netdev.c start = u64_stats_fetch_begin_irq(&rx_ring->syncp); start 2522 drivers/net/ethernet/amazon/ena/ena_netdev.c } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); start 2529 drivers/net/ethernet/amazon/ena/ena_netdev.c start = u64_stats_fetch_begin_irq(&adapter->syncp); start 2531 drivers/net/ethernet/amazon/ena/ena_netdev.c } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); start 684 drivers/net/ethernet/amd/a2065.c unsigned long board = z->resource.start; start 694 drivers/net/ethernet/amd/am79c961a.c dev->base_addr = res->start; start 715 drivers/net/ethernet/amd/ariadne.c unsigned long board = z->resource.start; start 1114 drivers/net/ethernet/amd/au1000_eth.c if (!request_mem_region(base->start, resource_size(base), start 1121 drivers/net/ethernet/amd/au1000_eth.c if (!request_mem_region(macen->start, resource_size(macen), start 1128 drivers/net/ethernet/amd/au1000_eth.c if (!request_mem_region(macdma->start, resource_size(macdma), start 1164 drivers/net/ethernet/amd/au1000_eth.c ioremap_nocache(base->start, resource_size(base)); start 1172 drivers/net/ethernet/amd/au1000_eth.c aup->enable = (u32 *)ioremap_nocache(macen->start, start 1181 drivers/net/ethernet/amd/au1000_eth.c aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); start 1283 drivers/net/ethernet/amd/au1000_eth.c dev->base_addr = base->start; start 1302 drivers/net/ethernet/amd/au1000_eth.c (unsigned long)base->start, irq); start 1340 drivers/net/ethernet/amd/au1000_eth.c release_mem_region(macdma->start, resource_size(macdma)); start 1342 drivers/net/ethernet/amd/au1000_eth.c release_mem_region(macen->start, resource_size(macen)); start 1344 drivers/net/ethernet/amd/au1000_eth.c release_mem_region(base->start, resource_size(base)); start 1377 drivers/net/ethernet/amd/au1000_eth.c release_mem_region(base->start, resource_size(base)); start 1380 drivers/net/ethernet/amd/au1000_eth.c release_mem_region(base->start, resource_size(base)); start 1383 drivers/net/ethernet/amd/au1000_eth.c release_mem_region(macen->start, resource_size(macen)); start 1031 drivers/net/ethernet/amd/declance.c resource_size_t start = 0, len = 0; start 1115 drivers/net/ethernet/amd/declance.c start = to_tc_dev(bdev)->resource.start; start 1116 drivers/net/ethernet/amd/declance.c len = to_tc_dev(bdev)->resource.end - start + 1; start 1117 drivers/net/ethernet/amd/declance.c if (!request_mem_region(start, len, dev_name(bdev))) { start 1125 drivers/net/ethernet/amd/declance.c dev->mem_start = CKSEG1ADDR(start); start 1275 drivers/net/ethernet/amd/declance.c release_mem_region(start, len); start 1345 drivers/net/ethernet/amd/declance.c resource_size_t start, len; start 1348 drivers/net/ethernet/amd/declance.c start = to_tc_dev(bdev)->resource.start; start 1349 drivers/net/ethernet/amd/declance.c len = to_tc_dev(bdev)->resource.end - start + 1; start 1350 drivers/net/ethernet/amd/declance.c release_mem_region(start, len); start 130 drivers/net/ethernet/amd/hplance.c unsigned long va = (d->resource.start + DIO_VIRADDRBASE); start 630 drivers/net/ethernet/amd/nmclan_cs.c dev->base_addr = link->resource[0]->start; start 1355 drivers/net/ethernet/amd/sunlance.c if (lebuffer->resource[0].start & 7) { start 1411 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c ret = pdata->phy_if.phy_impl.start(pdata); start 830 drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c phy_impl->start = xgbe_phy_start; start 3364 drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c phy_impl->start = xgbe_phy_start; start 852 drivers/net/ethernet/amd/xgbe/xgbe.h int (*start)(struct xgbe_prv_data *); start 32 drivers/net/ethernet/apm/xgene-v2/main.c pdata->resources.base_addr = devm_ioremap(dev, res->start, start 20 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len) start 22 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h u32 end = start + len - 1; start 23 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h u32 mask = GENMASK(end, start); start 26 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h *dst |= (val << start) & mask; start 29 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) start 31 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h return (val & GENMASK(end, start)) >> start; start 1697 drivers/net/ethernet/apm/xgene/xgene_enet_main.c pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); start 1708 drivers/net/ethernet/apm/xgene/xgene_enet_main.c pdata->ring_csr_addr = devm_ioremap(dev, res->start, start 1720 drivers/net/ethernet/apm/xgene/xgene_enet_main.c pdata->ring_cmd_addr = devm_ioremap(dev, res->start, start 1560 drivers/net/ethernet/apple/bmac.c bmac_proc_info(char *buffer, char **start, off_t offset, int length) start 1586 drivers/net/ethernet/apple/bmac.c *start = buffer + (offset - begin); start 1660 drivers/net/ethernet/atheros/ag71xx.c if (ar71xx_addr_ar7100[i] == res->start) start 1690 drivers/net/ethernet/atheros/ag71xx.c ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, start 1362 drivers/net/ethernet/aurora/nb8800.c dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); start 1408 drivers/net/ethernet/aurora/nb8800.c (unsigned long)res->start); start 1683 drivers/net/ethernet/broadcom/b44.c unsigned int start; start 1686 drivers/net/ethernet/broadcom/b44.c start = u64_stats_fetch_begin_irq(&hwstat->syncp); start 1720 drivers/net/ethernet/broadcom/b44.c } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); start 2075 drivers/net/ethernet/broadcom/b44.c unsigned int start; start 2085 drivers/net/ethernet/broadcom/b44.c start = u64_stats_fetch_begin_irq(&hwstat->syncp); start 2090 drivers/net/ethernet/broadcom/b44.c } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); start 1728 drivers/net/ethernet/broadcom/bcm63xx_enet.c dev->irq = priv->irq = res_irq->start; start 1729 drivers/net/ethernet/broadcom/bcm63xx_enet.c priv->irq_rx = res_irq_rx->start; start 1730 drivers/net/ethernet/broadcom/bcm63xx_enet.c priv->irq_tx = res_irq_tx->start; start 433 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int start; start 439 drivers/net/ethernet/broadcom/bcmsysport.c start = u64_stats_fetch_begin_irq(&priv->syncp); start 442 drivers/net/ethernet/broadcom/bcmsysport.c } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); start 457 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int start; start 486 drivers/net/ethernet/broadcom/bcmsysport.c start = u64_stats_fetch_begin_irq(syncp); start 488 drivers/net/ethernet/broadcom/bcmsysport.c } while (u64_stats_fetch_retry_irq(syncp, start)); start 1831 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int start; start 1839 drivers/net/ethernet/broadcom/bcmsysport.c start = u64_stats_fetch_begin_irq(&priv->syncp); start 1842 drivers/net/ethernet/broadcom/bcmsysport.c } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); start 156 drivers/net/ethernet/broadcom/bgmac.c if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { start 205 drivers/net/ethernet/broadcom/bgmac.c if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) start 248 drivers/net/ethernet/broadcom/bgmac.c while (ring->start != ring->end) { start 249 drivers/net/ethernet/broadcom/bgmac.c int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; start 280 drivers/net/ethernet/broadcom/bgmac.c ring->start++; start 424 drivers/net/ethernet/broadcom/bgmac.c while (ring->start != end_slot) { start 426 drivers/net/ethernet/broadcom/bgmac.c struct bgmac_slot_info *slot = &ring->slots[ring->start]; start 451 drivers/net/ethernet/broadcom/bgmac.c ring->start); start 459 drivers/net/ethernet/broadcom/bgmac.c ring->start); start 489 drivers/net/ethernet/broadcom/bgmac.c bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); start 491 drivers/net/ethernet/broadcom/bgmac.c if (++ring->start >= BGMAC_RX_RING_SLOTS) start 492 drivers/net/ethernet/broadcom/bgmac.c ring->start = 0; start 703 drivers/net/ethernet/broadcom/bgmac.c ring->start = 0; start 721 drivers/net/ethernet/broadcom/bgmac.c ring->start = 0; start 461 drivers/net/ethernet/broadcom/bgmac.h u32 start; start 1545 drivers/net/ethernet/broadcom/bnx2.c bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) start 1551 drivers/net/ethernet/broadcom/bnx2.c if (start) start 4536 drivers/net/ethernet/broadcom/bnx2.c u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; start 4550 drivers/net/ethernet/broadcom/bnx2.c if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) start 4566 drivers/net/ethernet/broadcom/bnx2.c memcpy(align_buf, start, 4); start 5739 drivers/net/ethernet/broadcom/bnx2.c bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) start 5750 drivers/net/ethernet/broadcom/bnx2.c bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]); start 5752 drivers/net/ethernet/broadcom/bnx2.c if (bnx2_reg_rd_ind(bp, start + offset) != start 927 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h &func_params.params.start; start 3187 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c int i, j, k, start; start 3220 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c start = 0; start 3222 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c start = 4; start 3223 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c memcpy(buf, bnx2x_tests_str_arr + start, start 511 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h u16 start; start 646 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { start 736 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h ILT_RANGE((ilt_start + ilt_cli->start), start 757 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h REG_WR(bp, start_reg, (ilt_start + ilt_cli->start)); start 772 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h for (i = ilt_cli->start; i <= ilt_cli->end; i++) start 907 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c u16 start = 0, end = 0; start 1107 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); start 1109 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (j = start; j != end; j = RX_BD(j + 1)) { start 1117 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c start = RX_SGE(fp->rx_sge_prod); start 1119 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (j = start; j != end; j = RX_SGE(j + 1)) { start 1127 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c start = RCQ_BD(fp->rx_comp_cons - 10); start 1129 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (j = start; j != end; j = RCQ_BD(j + 1)) { start 1153 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); start 1155 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (j = start; j != end; j = TX_BD(j + 1)) { start 1164 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c start = TX_BD(txdata->tx_bd_cons - 10); start 1166 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (j = start; j != end; j = TX_BD(j + 1)) { start 5327 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); start 7187 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_cli.start = 0; start 7964 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; start 7973 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; start 8631 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start = line; start 8639 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start, start 8651 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start = line; start 8661 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start, start 8674 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start = line; start 8680 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start, start 8691 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start = line; start 8697 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_client->start, start 9029 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ilt_cli.start = 0; start 14743 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) start 14749 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (start) { start 6142 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c struct bnx2x_func_start_params *start_params = ¶ms->params.start; start 1294 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h struct bnx2x_func_start_params start; start 1076 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); start 1080 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c vf->bars[n].bar = start + size * vf->abs_vfid; start 867 drivers/net/ethernet/broadcom/bnxt/bnxt.c u16 start, u32 agg_bufs, bool tpa) start 888 drivers/net/ethernet/broadcom/bnxt/bnxt.c agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); start 890 drivers/net/ethernet/broadcom/bnxt/bnxt.c agg = bnxt_get_agg(bp, cpr, idx, start + i); start 2551 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c u16 start = eeprom->offset, length = eeprom->len; start 2557 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c if (start < ETH_MODULE_SFF_8436_LEN) { start 2558 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) start 2559 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c length = ETH_MODULE_SFF_8436_LEN - start; start 2561 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c start, length, data); start 2564 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c start += length; start 2571 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c start -= ETH_MODULE_SFF_8436_LEN; start 2573 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c start, length, data); start 3185 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c time64_t start, s16 start_utc, u16 total_segs, start 3192 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c time64_to_tm(start, 0, &tm); start 3277 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c unsigned long start, end; start 3285 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c start = jiffies; start 3308 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c duration = jiffies_to_msecs(end - start); start 217 drivers/net/ethernet/broadcom/cnic.c static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) start 225 drivers/net/ethernet/broadcom/cnic.c if (start) start 659 drivers/net/ethernet/broadcom/cnic.c id_tbl->start = start_id; start 680 drivers/net/ethernet/broadcom/cnic.c id -= id_tbl->start; start 712 drivers/net/ethernet/broadcom/cnic.c id += id_tbl->start; start 725 drivers/net/ethernet/broadcom/cnic.c id -= id_tbl->start; start 142 drivers/net/ethernet/broadcom/cnic.h u32 start; start 439 drivers/net/ethernet/broadcom/genet/bcmmii.c res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; start 440 drivers/net/ethernet/broadcom/genet/bcmmii.c res.end = res.start + 8; start 2540 drivers/net/ethernet/broadcom/sb1250-mac.c sbm_base = ioremap_nocache(res->start, resource_size(res)); start 2555 drivers/net/ethernet/broadcom/sb1250-mac.c sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); start 2576 drivers/net/ethernet/broadcom/sb1250-mac.c err = sbmac_init(pldev, res->start); start 6297 drivers/net/ethernet/broadcom/tg3.c nsec = rq->perout.start.sec * 1000000000ULL + start 6298 drivers/net/ethernet/broadcom/tg3.c rq->perout.start.nsec; start 12096 drivers/net/ethernet/broadcom/tg3.c __be32 start = 0, end; start 12107 drivers/net/ethernet/broadcom/tg3.c ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); start 12132 drivers/net/ethernet/broadcom/tg3.c memcpy(buf, &start, 4); start 15771 drivers/net/ethernet/broadcom/tg3.c u32 val, offset, start, ver_offset; start 15776 drivers/net/ethernet/broadcom/tg3.c tg3_nvram_read(tp, 0x4, &start)) start 15799 drivers/net/ethernet/broadcom/tg3.c offset = offset + ver_offset - start; start 15894 drivers/net/ethernet/broadcom/tg3.c u32 val, offset, start; start 15911 drivers/net/ethernet/broadcom/tg3.c start = 0x08000000; start 15912 drivers/net/ethernet/broadcom/tg3.c else if (tg3_nvram_read(tp, offset - 4, &start)) start 15920 drivers/net/ethernet/broadcom/tg3.c offset += val - start; start 4232 drivers/net/ethernet/cadence/macb_main.c dev->base_addr = regs->start; start 48 drivers/net/ethernet/cadence/macb_pci.c res[0].start = pci_resource_start(pdev, 0); start 52 drivers/net/ethernet/cadence/macb_pci.c res[1].start = pci_irq_vector(pdev, 0); start 57 drivers/net/ethernet/cadence/macb_pci.c &res[0].start); start 1700 drivers/net/ethernet/calxeda/xgmac.c if (!request_mem_region(res->start, resource_size(res), pdev->name)) start 1722 drivers/net/ethernet/calxeda/xgmac.c priv->base = ioremap(res->start, resource_size(res)); start 1808 drivers/net/ethernet/calxeda/xgmac.c release_mem_region(res->start, resource_size(res)); start 1836 drivers/net/ethernet/calxeda/xgmac.c release_mem_region(res->start, resource_size(res)); start 170 drivers/net/ethernet/cavium/liquidio/octeon_device.h u64 start; start 100 drivers/net/ethernet/cavium/liquidio/octeon_main.h if (oct->mmio[baridx].start) start 121 drivers/net/ethernet/cavium/liquidio/octeon_main.h oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); start 132 drivers/net/ethernet/cavium/liquidio/octeon_main.h ioremap(oct->mmio[baridx].start, mapped_len); start 136 drivers/net/ethernet/cavium/liquidio/octeon_main.h baridx, oct->mmio[baridx].start, mapped_len, start 1447 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c p->mix_phys = res_mix->start; start 1449 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c p->agl_phys = res_agl->start; start 1451 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c p->agl_prt_ctl_phys = res_agl_prt_ctl->start; start 536 drivers/net/ethernet/chelsio/cxgb/cxgb2.c unsigned int start, unsigned int end) start 538 drivers/net/ethernet/chelsio/cxgb/cxgb2.c u32 *p = buf + start; start 540 drivers/net/ethernet/chelsio/cxgb/cxgb2.c for ( ; start <= end; start += sizeof(u32)) start 541 drivers/net/ethernet/chelsio/cxgb/cxgb2.c *p++ = readl(ap->regs + start); start 702 drivers/net/ethernet/chelsio/cxgb3/common.h int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, start 1730 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c unsigned int start, unsigned int end) start 1732 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c u32 *p = buf + start; start 1734 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c for (; start <= end; start += sizeof(u32)) start 1735 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c *p++ = t3_read_reg(ap, start); start 1012 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sg_ent *sgp, unsigned char *start, start 145 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, start 154 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c if (start >= size64 || start + n > size64) start 157 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c start *= (8 << mc7->width); start 166 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start); start 186 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c start += 8; start 1098 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end) start 1100 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c while (start <= end) { start 1105 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c SF_ERASE_SECTOR | (start << 8))) != 0 || start 1108 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c start++; start 2593 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c #define mem_region(adap, start, size, reg) \ start 2594 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c t3_write_reg((adap), A_ ## reg, (start)); \ start 2595 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c start += size start 2942 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c #define ulp_region(adap, name, start, len) \ start 2943 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \ start 2945 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c (start) + (len) - 1); \ start 2946 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c start += len start 2948 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c #define ulptx_region(adap, name, start, len) \ start 2949 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \ start 2951 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c (start) + (len) - 1) start 101 drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h u32 start; /* Start wrt 0 */ start 308 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c md->base = padap->vres.ocq.start; start 448 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c if (dparams->start != 0) { start 451 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c dparams->memtype, dparams->start, start 875 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c payload->start = mem_desc.base; start 879 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c &payload->start, &payload->end); start 989 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c payload[i].start = roundup(payload[i].start, start 1015 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c bytes_read >= payload[i].start && start 1873 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c ctx_info[i].start = mem_desc.base; start 1920 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size += (region_info[i].end - region_info[i].start + 1) / start 2006 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c region_info[CTXT_EGRESS].start + 1, start 2008 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c region_info[CTXT_INGRESS].start + 1); start 2030 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c max_ctx_size = region_info[i].end - region_info[i].start + 1; start 2040 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c region_info[i].start, max_ctx_size, start 2075 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c region_info[CTXT_FLM].start + 1; start 348 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h u32 start; /* start of log in firmware memory */ start 1688 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int start, int n, const u16 *rspq, unsigned int nrspq); start 1902 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct ulptx_sgl *sgl, u64 *end, unsigned int start, start 89 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c .start = seq_tab_start, start 449 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c unsigned int start; start 462 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c ((unsigned long long)v >> p->start) & mask); start 1071 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c .start = devlog_start, start 1091 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (dparams->start == 0) start 1108 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c dparams->start, dparams->size, (__be32 *)dinfo->log, start 1215 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c .start = mboxlog_start, start 1919 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c .start = mps_tcam_start, start 2551 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c .start = dcb_info_start, start 3037 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c .start = sge_queue_start, start 1168 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c u32 start = 1024 + adapter->pf * EEPROMPFSIZE; start 1170 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c if (aligned_offset < start || start 1171 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c aligned_offset + aligned_len > start + EEPROMPFSIZE) start 1876 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c offset = ((stag >> 8) * 32) + adap->vres.stag.start; start 3487 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c u32 start; start 3490 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); start 3491 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c start &= PCI_BASE_ADDRESS_MEM_MASK; start 3492 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); start 3496 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c start | BIR_V(1) | WINDOW_V(ilog2(sz_kb))); start 3499 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.ocq.start); start 4724 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.ddp.start = val[3]; start 4745 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.stag.start = val[0]; start 4747 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.rq.start = val[2]; start 4749 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.pbl.start = val[4]; start 4757 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.srq.start = val[0]; start 4776 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.qp.start = val[0]; start 4778 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.cq.start = val[2]; start 4780 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.ocq.start = val[4]; start 4820 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.iscsi.start = val[0]; start 4828 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.ppod_edram.start = val[0]; start 4863 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.key.start = val[0]; start 150 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c const struct cxgb4_match_field *start, *link_start = NULL; start 194 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c start = cxgb4_ipv6_fields; start 197 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c start = cxgb4_ipv4_fields; start 265 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c start, false); start 296 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c ret = fill_match_fields(adapter, &fs, cls, start, false); start 279 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h unsigned int start; start 733 drivers/net/ethernet/chelsio/cxgb4/l2t.c .start = l2t_seq_start, start 881 drivers/net/ethernet/chelsio/cxgb4/sge.c struct ulptx_sgl *sgl, u64 *end, unsigned int start, start 890 drivers/net/ethernet/chelsio/cxgb4/sge.c len = skb_headlen(skb) - start; start 893 drivers/net/ethernet/chelsio/cxgb4/sge.c sgl->addr0 = cpu_to_be64(addr[0] + start); start 1159 drivers/net/ethernet/chelsio/cxgb4/sge.c int start = skb_transport_offset(skb); start 1162 drivers/net/ethernet/chelsio/cxgb4/sge.c TXPKT_CSUM_START_V(start) | start 1163 drivers/net/ethernet/chelsio/cxgb4/sge.c TXPKT_CSUM_LOC_V(start + skb->csum_offset); start 3583 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) start 3590 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c while (start <= end) { start 3593 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c SF_ERASE_SECTOR | (start << 8))) != 0 || start 3597 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c start, ret); start 3600 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c start++; start 5158 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int start, int n, const u16 *rspq, unsigned int nrspq) start 5177 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c cmd.startidx = cpu_to_be16(start); start 5179 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c start += nq; start 9335 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4; start 9358 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; start 198 drivers/net/ethernet/chelsio/cxgb4/t4_hw.h #define FLASH_START(start) ((start) * SF_SEC_SIZE) start 1850 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c unsigned int start, unsigned int end) start 1852 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c u32 *bp = regbuf + start - T4VF_REGMAP_START; start 1854 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c for ( ; start <= end; start += sizeof(u32)) { start 1860 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL) start 1863 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c *bp++ = t4_read_reg(adapter, start); start 2022 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c .start = mboxlog_start, start 2176 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c .start = sge_queue_start, start 2322 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c .start = sge_qstats_start, start 2420 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c .start = interfaces_start, start 902 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct ulptx_sgl *sgl, u64 *end, unsigned int start, start 911 drivers/net/ethernet/chelsio/cxgb4vf/sge.c len = skb_headlen(skb) - start; start 914 drivers/net/ethernet/chelsio/cxgb4vf/sge.c sgl->addr0 = cpu_to_be64(addr[0] + start); start 1122 drivers/net/ethernet/chelsio/cxgb4vf/sge.c int start = skb_transport_offset(skb); start 1125 drivers/net/ethernet/chelsio/cxgb4vf/sge.c TXPKT_CSUM_START_V(start) | start 1126 drivers/net/ethernet/chelsio/cxgb4vf/sge.c TXPKT_CSUM_LOC_V(start + skb->csum_offset); start 1249 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c int start, int n, const u16 *rspq, int nrspq) start 1281 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c cmd.startidx = cpu_to_be16(start); start 1286 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c start += nq; start 87 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c unsigned int start, start 93 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask); start 95 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c if (unlikely(i >= max_ppods) && (start > nr)) start 96 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1, start 391 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c unsigned int llimit, unsigned int start, start 407 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c ((iscsi_edram_start + iscsi_edram_size) != start)) { start 410 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c iscsi_edram_start, iscsi_edram_size, start); start 416 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c start = iscsi_edram_start; start 452 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c unsigned int start = ppmax - ppmax_pool; start 455 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c bitmap_set(ppm->ppod_bmap, ppmax, end - start); start 457 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c __func__, ppmax, ppmax_pool, ppod_bmap_size, start, start 481 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c ppm->base_idx = start > llimit ? start 482 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0; start 329 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h unsigned int start, unsigned int reserve_factor, start 783 drivers/net/ethernet/cirrus/ep93xx_eth.c release_mem_region(mem->start, resource_size(mem)); start 821 drivers/net/ethernet/cirrus/ep93xx_eth.c ep->res = request_mem_region(mem->start, resource_size(mem), start 829 drivers/net/ethernet/cirrus/ep93xx_eth.c ep->base_addr = ioremap(mem->start, resource_size(mem)); start 2137 drivers/net/ethernet/cisco/enic/enic_main.c int (*start)(struct vnic_dev *, int), start 2147 drivers/net/ethernet/cisco/enic/enic_main.c err = start(vdev, arg); start 1928 drivers/net/ethernet/cortina/gemini.c unsigned int start; start 1934 drivers/net/ethernet/cortina/gemini.c start = u64_stats_fetch_begin(&port->rx_stats_syncp); start 1946 drivers/net/ethernet/cortina/gemini.c } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); start 1950 drivers/net/ethernet/cortina/gemini.c start = u64_stats_fetch_begin(&port->ir_stats_syncp); start 1960 drivers/net/ethernet/cortina/gemini.c } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); start 1964 drivers/net/ethernet/cortina/gemini.c start = u64_stats_fetch_begin(&port->tx_stats_syncp); start 1968 drivers/net/ethernet/cortina/gemini.c } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); start 2037 drivers/net/ethernet/cortina/gemini.c unsigned int start; start 2046 drivers/net/ethernet/cortina/gemini.c start = u64_stats_fetch_begin(&port->ir_stats_syncp); start 2051 drivers/net/ethernet/cortina/gemini.c } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); start 2057 drivers/net/ethernet/cortina/gemini.c start = u64_stats_fetch_begin(&port->rx_stats_syncp); start 2065 drivers/net/ethernet/cortina/gemini.c } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); start 2071 drivers/net/ethernet/cortina/gemini.c start = u64_stats_fetch_begin(&port->tx_stats_syncp); start 2080 drivers/net/ethernet/cortina/gemini.c } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); start 2510 drivers/net/ethernet/cortina/gemini.c port->irq, &dmares->start, start 2511 drivers/net/ethernet/cortina/gemini.c &gmacres->start); start 1533 drivers/net/ethernet/davicom/dm9000.c db->addr_req = request_mem_region(db->addr_res->start, iosize, start 1542 drivers/net/ethernet/davicom/dm9000.c db->io_addr = ioremap(db->addr_res->start, iosize); start 1551 drivers/net/ethernet/davicom/dm9000.c db->data_req = request_mem_region(db->data_res->start, iosize, start 1560 drivers/net/ethernet/davicom/dm9000.c db->io_data = ioremap(db->data_res->start, iosize); start 842 drivers/net/ethernet/dnet.c bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); start 377 drivers/net/ethernet/emulex/benet/be_ethtool.c unsigned int i, j, base = 0, start; start 389 drivers/net/ethernet/emulex/benet/be_ethtool.c start = u64_stats_fetch_begin_irq(&stats->sync); start 392 drivers/net/ethernet/emulex/benet/be_ethtool.c } while (u64_stats_fetch_retry_irq(&stats->sync, start)); start 405 drivers/net/ethernet/emulex/benet/be_ethtool.c start = u64_stats_fetch_begin_irq(&stats->sync_compl); start 407 drivers/net/ethernet/emulex/benet/be_ethtool.c } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); start 410 drivers/net/ethernet/emulex/benet/be_ethtool.c start = u64_stats_fetch_begin_irq(&stats->sync); start 417 drivers/net/ethernet/emulex/benet/be_ethtool.c } while (u64_stats_fetch_retry_irq(&stats->sync, start)); start 662 drivers/net/ethernet/emulex/benet/be_main.c unsigned int start; start 669 drivers/net/ethernet/emulex/benet/be_main.c start = u64_stats_fetch_begin_irq(&rx_stats->sync); start 672 drivers/net/ethernet/emulex/benet/be_main.c } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); start 684 drivers/net/ethernet/emulex/benet/be_main.c start = u64_stats_fetch_begin_irq(&tx_stats->sync); start 687 drivers/net/ethernet/emulex/benet/be_main.c } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); start 2140 drivers/net/ethernet/emulex/benet/be_main.c int eqd, start; start 2159 drivers/net/ethernet/emulex/benet/be_main.c start = u64_stats_fetch_begin_irq(&rxo->stats.sync); start 2161 drivers/net/ethernet/emulex/benet/be_main.c } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); start 2166 drivers/net/ethernet/emulex/benet/be_main.c start = u64_stats_fetch_begin_irq(&txo->stats.sync); start 2168 drivers/net/ethernet/emulex/benet/be_main.c } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); start 2328 drivers/net/ethernet/emulex/benet/be_main.c u8 *start; start 2331 drivers/net/ethernet/emulex/benet/be_main.c start = page_address(page_info->page) + page_info->page_offset; start 2332 drivers/net/ethernet/emulex/benet/be_main.c prefetch(start); start 2339 drivers/net/ethernet/emulex/benet/be_main.c memcpy(skb->data, start, curr_frag_len); start 2346 drivers/net/ethernet/emulex/benet/be_main.c memcpy(skb->data, start, hdr_len); start 1050 drivers/net/ethernet/ethoc.c mmio = devm_request_mem_region(&pdev->dev, res->start, start 1058 drivers/net/ethernet/ethoc.c netdev->base_addr = mmio->start; start 1063 drivers/net/ethernet/ethoc.c mem = devm_request_mem_region(&pdev->dev, res->start, start 1071 drivers/net/ethernet/ethoc.c netdev->mem_start = mem->start; start 1084 drivers/net/ethernet/ethoc.c netdev->irq = res->start; start 1778 drivers/net/ethernet/faraday/ftgmac100.c priv->res = request_mem_region(res->start, resource_size(res), start 1786 drivers/net/ethernet/faraday/ftgmac100.c priv->base = ioremap(res->start, resource_size(res)); start 1097 drivers/net/ethernet/faraday/ftmac100.c priv->res = request_mem_region(res->start, resource_size(res), start 1105 drivers/net/ethernet/faraday/ftmac100.c priv->base = ioremap(res->start, resource_size(res)); start 235 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c net_dev->mem_start = priv->mac_dev->res->start; start 663 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c u32 start, u32 count, start 677 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_fq[i].fqid = start ? start + i : 0; start 2544 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c err = priv->mac_dev->start(mac_dev); start 47 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c (unsigned long long)mac_dev->res->start); start 394 drivers/net/ethernet/freescale/enetc/enetc_hw.h static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags) start 397 drivers/net/ethernet/freescale/enetc/enetc_hw.h (start & ENETC_TXBD_L3_START_MASK)); start 853 drivers/net/ethernet/freescale/fec_mpc52xx.c if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), start 863 drivers/net/ethernet/freescale/fec_mpc52xx.c ndev->base_addr = mem.start; start 869 drivers/net/ethernet/freescale/fec_mpc52xx.c priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec)); start 979 drivers/net/ethernet/freescale/fec_mpc52xx.c release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); start 89 drivers/net/ethernet/freescale/fec_mpc52xx_phy.c priv->regs = ioremap(res.start, resource_size(&res)); start 95 drivers/net/ethernet/freescale/fec_mpc52xx_phy.c snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); start 1728 drivers/net/ethernet/freescale/fman/fman.c fman_muram_init(fman->dts_params.muram_res.start, start 2746 drivers/net/ethernet/freescale/fman/fman.c irq = res->start; start 2755 drivers/net/ethernet/freescale/fman/fman.c fman->dts_params.err_irq = res->start; start 2765 drivers/net/ethernet/freescale/fman/fman.c phys_base_addr = res->start; start 1860 drivers/net/ethernet/freescale/fman/fman_port.c dev_res = __devm_request_region(port->dev, &res, res.start, start 1869 drivers/net/ethernet/freescale/fman/fman_port.c port->dts_params.base_addr = devm_ioremap(port->dev, res.start, start 103 drivers/net/ethernet/freescale/fman/mac.c devm_ioremap(priv->dev, mac_dev->res->start, start 472 drivers/net/ethernet/freescale/fman/mac.c mac_dev->start = start; start 492 drivers/net/ethernet/freescale/fman/mac.c mac_dev->start = start; start 512 drivers/net/ethernet/freescale/fman/mac.c mac_dev->start = start; start 695 drivers/net/ethernet/freescale/fman/mac.c res.start, res.end + 1 - res.start, start 703 drivers/net/ethernet/freescale/fman/mac.c priv->vaddr = devm_ioremap(dev, mac_dev->res->start, start 704 drivers/net/ethernet/freescale/fman/mac.c mac_dev->res->end + 1 - mac_dev->res->start); start 65 drivers/net/ethernet/freescale/fman/mac.h int (*start)(struct mac_device *mac_dev); start 129 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); start 141 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c bitbang->dir = ioremap(res.start, resource_size(&res)); start 130 drivers/net/ethernet/freescale/fs_enet/mii-fec.c snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); start 132 drivers/net/ethernet/freescale/fs_enet/mii-fec.c fec->fecp = ioremap(res.start, resource_size(&res)); start 84 drivers/net/ethernet/freescale/fsl_pq_mdio.c void (*ucc_configure)(phys_addr_t start, phys_addr_t end); start 249 drivers/net/ethernet/freescale/fsl_pq_mdio.c static void ucc_configure(phys_addr_t start, phys_addr_t end) start 271 drivers/net/ethernet/freescale/fsl_pq_mdio.c if ((start < res.start) || (end > res.end)) start 445 drivers/net/ethernet/freescale/fsl_pq_mdio.c (unsigned long long)res.start); start 493 drivers/net/ethernet/freescale/fsl_pq_mdio.c data->ucc_configure(res.start, res.end); start 3786 drivers/net/ethernet/freescale/ucc_geth.c ug_info->uf_info.regs = res.start; start 266 drivers/net/ethernet/freescale/xgmac_mdio.c snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); start 287 drivers/net/ethernet/fujitsu/fmvj18x_cs.c link->resource[1]->start = serial_base[i]; start 289 drivers/net/ethernet/fujitsu/fmvj18x_cs.c if (link->resource[1]->start == 0) { start 309 drivers/net/ethernet/fujitsu/fmvj18x_cs.c link->resource[0]->start = ioaddr; start 314 drivers/net/ethernet/fujitsu/fmvj18x_cs.c ((link->resource[0]->start & 0x0f0) >> 3) | 0x22; start 433 drivers/net/ethernet/fujitsu/fmvj18x_cs.c dev->base_addr = link->resource[0]->start; start 544 drivers/net/ethernet/fujitsu/fmvj18x_cs.c link->resource[2]->start = 0; link->resource[2]->end = 0; start 549 drivers/net/ethernet/fujitsu/fmvj18x_cs.c base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); start 595 drivers/net/ethernet/fujitsu/fmvj18x_cs.c link->resource[3]->start = link->resource[3]->end = 0; start 600 drivers/net/ethernet/fujitsu/fmvj18x_cs.c lp->base = ioremap(link->resource[3]->start, start 95 drivers/net/ethernet/google/gve/gve_ethtool.c unsigned int start; start 105 drivers/net/ethernet/google/gve/gve_ethtool.c start = start 110 drivers/net/ethernet/google/gve/gve_ethtool.c start)); start 117 drivers/net/ethernet/google/gve/gve_ethtool.c start = start 122 drivers/net/ethernet/google/gve/gve_ethtool.c start)); start 32 drivers/net/ethernet/google/gve/gve_main.c unsigned int start; start 38 drivers/net/ethernet/google/gve/gve_main.c start = start 43 drivers/net/ethernet/google/gve/gve_main.c start)); start 49 drivers/net/ethernet/google/gve/gve_main.c start = start 54 drivers/net/ethernet/google/gve/gve_main.c start)); start 468 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c u32 start, end, num, pos, i; start 473 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR)); start 476 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c num = CIRC_SPACE(start, end, RX_DESC_NUM); start 478 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c for (i = 0, pos = start; i < num; i++) { start 504 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c if (pos != start) start 514 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c u32 start, end, num, pos, i, len; start 517 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR)); start 520 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c num = CIRC_CNT(end, start, RX_DESC_NUM); start 526 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c for (i = 0, pos = start; i < num; i++) { start 558 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c if (pos != start) start 593 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c u32 start, end, num, pos, i; start 599 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR)); start 602 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c num = CIRC_CNT(end, start, TX_DESC_NUM); start 604 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c for (i = 0, pos = start; i < num; i++) { start 628 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c if (pos != start) start 472 drivers/net/ethernet/hisilicon/hns/hnae.h int (*start)(struct hnae_handle *handle); start 959 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c .start = hns_ae_start, start 145 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c dsaf_dev->ppe_paddr = res->start; start 1348 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; start 1572 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = ops->start ? ops->start(h) : 0; start 352 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; start 379 drivers/net/ethernet/hisilicon/hns3/hnae3.h int (*start)(struct hnae3_handle *handle); start 400 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; start 1496 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c unsigned int start; start 1516 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 1529 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 1534 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 1543 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 10143 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c .start = hclge_ae_start, start 3108 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c .start = hclgevf_ae_start, start 73 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQE_IN_RANGE(wqe, start, end) \ start 74 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c (((unsigned long)(wqe) >= (unsigned long)(start)) && \ start 74 drivers/net/ethernet/huawei/hinic/hinic_rx.c unsigned int start; start 78 drivers/net/ethernet/huawei/hinic/hinic_rx.c start = u64_stats_fetch_begin(&rxq_stats->syncp); start 85 drivers/net/ethernet/huawei/hinic/hinic_rx.c } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); start 98 drivers/net/ethernet/huawei/hinic/hinic_tx.c unsigned int start; start 102 drivers/net/ethernet/huawei/hinic/hinic_tx.c start = u64_stats_fetch_begin(&txq_stats->syncp); start 109 drivers/net/ethernet/huawei/hinic/hinic_tx.c } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); start 218 drivers/net/ethernet/huawei/hinic/hinic_tx.c int start = exthdr - skb->data; start 221 drivers/net/ethernet/huawei/hinic/hinic_tx.c ipv6_skip_exthdr(skb, start, l4_proto, &frag_off); start 126 drivers/net/ethernet/i825xx/ether1.c ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) start 131 drivers/net/ethernet/i825xx/ether1.c offset = start & 4095; start 132 drivers/net/ethernet/i825xx/ether1.c page = start >> 12; start 189 drivers/net/ethernet/i825xx/ether1.c ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) start 194 drivers/net/ethernet/i825xx/ether1.c offset = start & 4095; start 195 drivers/net/ethernet/i825xx/ether1.c page = start >> 12; start 614 drivers/net/ethernet/i825xx/ether1.c int start, tail; start 622 drivers/net/ethernet/i825xx/ether1.c start = TX_AREA_START; start 623 drivers/net/ethernet/i825xx/ether1.c if (start + size > tail) start 625 drivers/net/ethernet/i825xx/ether1.c priv(dev)->tx_head = start + size; start 629 drivers/net/ethernet/i825xx/ether1.c start = priv(dev)->tx_head; start 633 drivers/net/ethernet/i825xx/ether1.c return start; start 163 drivers/net/ethernet/i825xx/lasi_82596.c __FILE__, (unsigned long)dev->hpa.start); start 168 drivers/net/ethernet/i825xx/lasi_82596.c (unsigned long)dev->hpa.start, dev->irq); start 176 drivers/net/ethernet/i825xx/lasi_82596.c netdevice->base_addr = dev->hpa.start; start 94 drivers/net/ethernet/i825xx/sni_82596.c mpu_addr = ioremap_nocache(res->start, 4); start 97 drivers/net/ethernet/i825xx/sni_82596.c ca_addr = ioremap_nocache(ca->start, 4); start 101 drivers/net/ethernet/i825xx/sni_82596.c printk(KERN_INFO "Found i82596 at 0x%x\n", res->start); start 110 drivers/net/ethernet/i825xx/sni_82596.c netdevice->base_addr = res->start; start 113 drivers/net/ethernet/i825xx/sni_82596.c eth_addr = ioremap_nocache(idprom->start, 0x10); start 237 drivers/net/ethernet/ibm/emac/rgmii.c dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, start 108 drivers/net/ethernet/ibm/emac/tah.c dev->base = (struct tah_regs __iomem *)ioremap(regs.start, start 253 drivers/net/ethernet/ibm/emac/zmii.c dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, start 789 drivers/net/ethernet/intel/e100.c static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) start 797 drivers/net/ethernet/intel/e100.c if (start + count >= nic->eeprom_wc) start 800 drivers/net/ethernet/intel/e100.c for (addr = start; addr < start + count; addr++) start 1466 drivers/net/ethernet/intel/e1000/e1000_main.c static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, start 1470 drivers/net/ethernet/intel/e1000/e1000_main.c unsigned long begin = (unsigned long)start; start 100 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c .start = fm10k_dbg_desc_seq_start, start 107 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c .start = fm10k_dbg_desc_seq_start, start 1333 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c unsigned int start, i; start 1345 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 1348 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 1361 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 1364 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 170 drivers/net/ethernet/intel/i40e/i40e_ethtool.c unsigned int start; start 179 drivers/net/ethernet/intel/i40e/i40e_ethtool.c start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); start 184 drivers/net/ethernet/intel/i40e/i40e_ethtool.c } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); start 415 drivers/net/ethernet/intel/i40e/i40e_main.c unsigned int start; start 418 drivers/net/ethernet/intel/i40e/i40e_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 421 drivers/net/ethernet/intel/i40e/i40e_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 453 drivers/net/ethernet/intel/i40e/i40e_main.c unsigned int start; start 467 drivers/net/ethernet/intel/i40e/i40e_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 470 drivers/net/ethernet/intel/i40e/i40e_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 781 drivers/net/ethernet/intel/i40e/i40e_main.c unsigned int start; start 811 drivers/net/ethernet/intel/i40e/i40e_main.c start = u64_stats_fetch_begin_irq(&p->syncp); start 814 drivers/net/ethernet/intel/i40e/i40e_main.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 825 drivers/net/ethernet/intel/i40e/i40e_main.c start = u64_stats_fetch_begin_irq(&p->syncp); start 828 drivers/net/ethernet/intel/i40e/i40e_main.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 163 drivers/net/ethernet/intel/iavf/iavf_ethtool.c unsigned int start; start 172 drivers/net/ethernet/intel/iavf/iavf_ethtool.c start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); start 175 drivers/net/ethernet/intel/iavf/iavf_ethtool.c } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); start 2827 drivers/net/ethernet/intel/ice/ice_lib.c int start = 0, end = 0; start 2837 drivers/net/ethernet/intel/ice/ice_lib.c start = end; start 2838 drivers/net/ethernet/intel/ice/ice_lib.c if ((start + needed) > res->end) start 2842 drivers/net/ethernet/intel/ice/ice_lib.c if (end == (start + needed)) { start 2843 drivers/net/ethernet/intel/ice/ice_lib.c int i = start; start 2849 drivers/net/ethernet/intel/ice/ice_lib.c return start; start 3598 drivers/net/ethernet/intel/ice/ice_main.c unsigned int start; start 3605 drivers/net/ethernet/intel/ice/ice_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 3608 drivers/net/ethernet/intel/ice/ice_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 566 drivers/net/ethernet/intel/igb/igb.h struct timespec64 start; start 2296 drivers/net/ethernet/intel/igb/igb_ethtool.c unsigned int start; start 2319 drivers/net/ethernet/intel/igb/igb_ethtool.c start = u64_stats_fetch_begin_irq(&ring->tx_syncp); start 2323 drivers/net/ethernet/intel/igb/igb_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); start 2325 drivers/net/ethernet/intel/igb/igb_ethtool.c start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); start 2327 drivers/net/ethernet/intel/igb/igb_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); start 2335 drivers/net/ethernet/intel/igb/igb_ethtool.c start = u64_stats_fetch_begin_irq(&ring->rx_syncp); start 2341 drivers/net/ethernet/intel/igb/igb_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); start 6270 drivers/net/ethernet/intel/igb/igb_main.c unsigned int start; start 6297 drivers/net/ethernet/intel/igb/igb_main.c start = u64_stats_fetch_begin_irq(&ring->rx_syncp); start 6300 drivers/net/ethernet/intel/igb/igb_main.c } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); start 6313 drivers/net/ethernet/intel/igb/igb_main.c start = u64_stats_fetch_begin_irq(&ring->tx_syncp); start 6316 drivers/net/ethernet/intel/igb/igb_main.c } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); start 6470 drivers/net/ethernet/intel/igb/igb_main.c ts = timespec64_add(adapter->perout[0].start, start 6478 drivers/net/ethernet/intel/igb/igb_main.c adapter->perout[0].start = ts; start 6485 drivers/net/ethernet/intel/igb/igb_main.c ts = timespec64_add(adapter->perout[1].start, start 6492 drivers/net/ethernet/intel/igb/igb_main.c adapter->perout[1].start = ts; start 624 drivers/net/ethernet/intel/igb/igb_ptp.c igb->perout[i].start.tv_sec = rq->perout.start.sec; start 625 drivers/net/ethernet/intel/igb/igb_ptp.c igb->perout[i].start.tv_nsec = rq->perout.start.nsec; start 628 drivers/net/ethernet/intel/igb/igb_ptp.c wr32(trgttimh, rq->perout.start.sec); start 629 drivers/net/ethernet/intel/igb/igb_ptp.c wr32(trgttiml, rq->perout.start.nsec); start 728 drivers/net/ethernet/intel/igc/igc_ethtool.c unsigned int start; start 751 drivers/net/ethernet/intel/igc/igc_ethtool.c start = u64_stats_fetch_begin_irq(&ring->tx_syncp); start 755 drivers/net/ethernet/intel/igc/igc_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); start 757 drivers/net/ethernet/intel/igc/igc_ethtool.c start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); start 759 drivers/net/ethernet/intel/igc/igc_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); start 767 drivers/net/ethernet/intel/igc/igc_ethtool.c start = u64_stats_fetch_begin_irq(&ring->rx_syncp); start 773 drivers/net/ethernet/intel/igc/igc_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); start 1909 drivers/net/ethernet/intel/igc/igc_main.c unsigned int start; start 1938 drivers/net/ethernet/intel/igc/igc_main.c start = u64_stats_fetch_begin_irq(&ring->rx_syncp); start 1941 drivers/net/ethernet/intel/igc/igc_main.c } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); start 1955 drivers/net/ethernet/intel/igc/igc_main.c start = u64_stats_fetch_begin_irq(&ring->tx_syncp); start 1958 drivers/net/ethernet/intel/igc/igc_main.c } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); start 1198 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c unsigned int start; start 1233 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 1236 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 1249 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 1252 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 8920 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int start; start 8924 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 8927 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 8943 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int start; start 8947 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 8950 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 930 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c u16 length, bufsz, i, start; start 950 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c start = 0; start 953 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c start = 1; start 962 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c if (buffer && ((u32)start + (u32)length > buffer_size)) start 965 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c for (i = start; length; i++, length--) { start 427 drivers/net/ethernet/intel/ixgbevf/ethtool.c unsigned int start; start 463 drivers/net/ethernet/intel/ixgbevf/ethtool.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 466 drivers/net/ethernet/intel/ixgbevf/ethtool.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 480 drivers/net/ethernet/intel/ixgbevf/ethtool.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 483 drivers/net/ethernet/intel/ixgbevf/ethtool.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 497 drivers/net/ethernet/intel/ixgbevf/ethtool.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 500 drivers/net/ethernet/intel/ixgbevf/ethtool.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 4361 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int start; start 4365 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 4368 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 4378 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int start; start 4391 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c start = u64_stats_fetch_begin_irq(&ring->syncp); start 4394 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); start 1045 drivers/net/ethernet/korina.c dev->base_addr = r->start; start 1046 drivers/net/ethernet/korina.c lp->eth_regs = ioremap_nocache(r->start, resource_size(r)); start 1054 drivers/net/ethernet/korina.c lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r)); start 1062 drivers/net/ethernet/korina.c lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r)); start 644 drivers/net/ethernet/lantiq_etop.c res = devm_request_mem_region(&pdev->dev, res->start, start 653 drivers/net/ethernet/lantiq_etop.c res->start, resource_size(res)); start 2853 drivers/net/ethernet/marvell/mv643xx_eth.c msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 2979 drivers/net/ethernet/marvell/mv643xx_eth.c int start; start 2985 drivers/net/ethernet/marvell/mv643xx_eth.c start = phy_addr_get(mp) & 0x1f; start 2988 drivers/net/ethernet/marvell/mv643xx_eth.c start = phy_addr & 0x1f; start 2995 drivers/net/ethernet/marvell/mv643xx_eth.c int addr = (start + i) & 0x1f; start 3183 drivers/net/ethernet/marvell/mv643xx_eth.c dev->irq = res->start; start 314 drivers/net/ethernet/marvell/mvmdio.c dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 704 drivers/net/ethernet/marvell/mvneta.c unsigned int start; start 718 drivers/net/ethernet/marvell/mvneta.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 725 drivers/net/ethernet/marvell/mvneta.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 3937 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c unsigned int start; start 3949 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 3954 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 389 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, start 394 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c if (start > end) start 395 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c swap(start, end); start 400 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c for (tid = start; tid <= end; tid++) { start 99 drivers/net/ethernet/marvell/octeontx2/af/rvu.c int start; start 104 drivers/net/ethernet/marvell/octeontx2/af/rvu.c start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); start 105 drivers/net/ethernet/marvell/octeontx2/af/rvu.c if (start >= rsrc->max) start 108 drivers/net/ethernet/marvell/octeontx2/af/rvu.c bitmap_set(rsrc->bmap, start, nrsrc); start 109 drivers/net/ethernet/marvell/octeontx2/af/rvu.c return start; start 112 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) start 116 drivers/net/ethernet/marvell/octeontx2/af/rvu.c if (start >= rsrc->max) start 119 drivers/net/ethernet/marvell/octeontx2/af/rvu.c bitmap_clear(rsrc->bmap, start, nrsrc); start 124 drivers/net/ethernet/marvell/octeontx2/af/rvu.c int start; start 129 drivers/net/ethernet/marvell/octeontx2/af/rvu.c start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); start 130 drivers/net/ethernet/marvell/octeontx2/af/rvu.c if (start >= rsrc->max) start 348 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); start 337 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) start 350 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start); start 1342 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, start 1351 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index = find_next_zero_bit(map, size, start); start 1363 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = next + 1; start 1373 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) start 1379 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c if (start >= end) start 1382 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index = find_next_zero_bit(map, end, start); start 1389 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = next + 1; start 1400 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c u16 *start, u16 *end, bool *reverse) start 1418 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c *start = req->ref_entry + 1; start 1427 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c *start = mcam->hprio_end; start 1443 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c *start = 0; start 1453 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c *start = mcam->hprio_end; start 1463 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c u16 start, end, index; start 1496 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c &start, &end, &reverse); start 1512 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = mcam->hprio_end; start 1518 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = mcam->hprio_end / 2; start 1526 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = 0; start 1533 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = mcam->bmap_entries - start; start 1535 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index = start; start 1536 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = end; start 1546 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index = npc_mcam_find_zero_area(bmap, end, start, start 1558 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c next_start = start; start 1564 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c next_start = start + (index - start) + 1; start 1578 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c ((end - start) != mcam->bmap_entries)) { start 1580 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = 0; start 1590 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c (start != (req->ref_entry + 1))) { start 1591 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = req->ref_entry + 1; start 1596 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c ((end - start) != req->ref_entry)) { start 1597 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c start = 0; start 20 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c u64 start; start 66 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c if (reg >= map->range[idx].start && start 440 drivers/net/ethernet/marvell/pxa168_eth.c struct addr_table_entry *entry, *start; start 466 drivers/net/ethernet/marvell/pxa168_eth.c start = pep->htpr; start 467 drivers/net/ethernet/marvell/pxa168_eth.c entry = start + hash_function(mac_addr); start 479 drivers/net/ethernet/marvell/pxa168_eth.c if (entry == start + 0x7ff) start 480 drivers/net/ethernet/marvell/pxa168_eth.c entry = start; start 1436 drivers/net/ethernet/marvell/pxa168_eth.c dev->irq = res->start; start 915 drivers/net/ethernet/marvell/skge.c ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); start 916 drivers/net/ethernet/marvell/skge.c if (!ring->start) start 919 drivers/net/ethernet/marvell/skge.c for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { start 922 drivers/net/ethernet/marvell/skge.c e->next = ring->start; start 929 drivers/net/ethernet/marvell/skge.c ring->to_use = ring->to_clean = ring->start; start 987 drivers/net/ethernet/marvell/skge.c e = ring->start; start 999 drivers/net/ethernet/marvell/skge.c } while ((e = e->next) != ring->start); start 1012 drivers/net/ethernet/marvell/skge.c e = ring->start; start 1026 drivers/net/ethernet/marvell/skge.c } while ((e = e->next) != ring->start); start 1028 drivers/net/ethernet/marvell/skge.c ring->to_clean = ring->start; start 2478 drivers/net/ethernet/marvell/skge.c static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) start 2482 drivers/net/ethernet/marvell/skge.c start /= 8; start 2484 drivers/net/ethernet/marvell/skge.c end = start + len - 1; start 2487 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_START), start); start 2488 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_WP), start); start 2489 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_RP), start); start 2495 drivers/net/ethernet/marvell/skge.c start + (2*len)/3); start 2497 drivers/net/ethernet/marvell/skge.c start + (len/3)); start 2622 drivers/net/ethernet/marvell/skge.c kfree(skge->tx_ring.start); start 2625 drivers/net/ethernet/marvell/skge.c kfree(skge->rx_ring.start); start 2715 drivers/net/ethernet/marvell/skge.c kfree(skge->rx_ring.start); start 2716 drivers/net/ethernet/marvell/skge.c kfree(skge->tx_ring.start); start 2818 drivers/net/ethernet/marvell/skge.c e - skge->tx_ring.start, skb->len); start 3055 drivers/net/ethernet/marvell/skge.c e - skge->rx_ring.start, status, len); start 3122 drivers/net/ethernet/marvell/skge.c e - skge->rx_ring.start, control, status); start 3166 drivers/net/ethernet/marvell/skge.c e - skge->tx_ring.start); start 2405 drivers/net/ethernet/marvell/skge.h struct skge_element *start; start 1036 drivers/net/ethernet/marvell/sky2.c static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) start 1041 drivers/net/ethernet/marvell/sky2.c start *= 1024/8; start 1043 drivers/net/ethernet/marvell/sky2.c end = start + space - 1; start 1046 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_START), start); start 1048 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_WP), start); start 1049 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_RP), start); start 1451 drivers/net/ethernet/marvell/sky2.c unsigned char *start; start 1458 drivers/net/ethernet/marvell/sky2.c start = PTR_ALIGN(skb->data, 8); start 1459 drivers/net/ethernet/marvell/sky2.c skb_reserve(skb, start - skb->data); start 3892 drivers/net/ethernet/marvell/sky2.c unsigned int start; start 3896 drivers/net/ethernet/marvell/sky2.c start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); start 3899 drivers/net/ethernet/marvell/sky2.c } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); start 3905 drivers/net/ethernet/marvell/sky2.c start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); start 3908 drivers/net/ethernet/marvell/sky2.c } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); start 4265 drivers/net/ethernet/marvell/sky2.c unsigned long start = jiffies; start 4269 drivers/net/ethernet/marvell/sky2.c if (time_after(jiffies, start + HZ/4)) { start 717 drivers/net/ethernet/mediatek/mtk_eth_soc.c unsigned int start; start 727 drivers/net/ethernet/mediatek/mtk_eth_soc.c start = u64_stats_fetch_begin_irq(&hw_stats->syncp); start 739 drivers/net/ethernet/mediatek/mtk_eth_soc.c } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); start 2667 drivers/net/ethernet/mediatek/mtk_eth_soc.c unsigned int start; start 2684 drivers/net/ethernet/mediatek/mtk_eth_soc.c start = u64_stats_fetch_begin_irq(&hwstats->syncp); start 2688 drivers/net/ethernet/mediatek/mtk_eth_soc.c } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); start 80 drivers/net/ethernet/mellanox/mlx4/alloc.c u32 start, u32 nbits, start 86 drivers/net/ethernet/mellanox/mlx4/alloc.c start = ALIGN(start, align); start 88 drivers/net/ethernet/mellanox/mlx4/alloc.c while ((start < nbits) && (test_bit(start, bitmap) || start 89 drivers/net/ethernet/mellanox/mlx4/alloc.c (start & skip_mask))) start 90 drivers/net/ethernet/mellanox/mlx4/alloc.c start += align; start 92 drivers/net/ethernet/mellanox/mlx4/alloc.c if (start >= nbits) start 95 drivers/net/ethernet/mellanox/mlx4/alloc.c end = start+len; start 99 drivers/net/ethernet/mellanox/mlx4/alloc.c for (i = start + 1; i < end; i++) { start 101 drivers/net/ethernet/mellanox/mlx4/alloc.c start = i + 1; start 106 drivers/net/ethernet/mellanox/mlx4/alloc.c return start; start 135 drivers/net/ethernet/mellanox/mlx4/en_port.c static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) start 137 drivers/net/ethernet/mellanox/mlx4/en_port.c __be64 *curr = start; start 140 drivers/net/ethernet/mellanox/mlx4/en_port.c int offset = next - start; start 383 drivers/net/ethernet/mellanox/mlx4/icm.c u32 start, u32 end) start 389 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = start; i <= end; i += inc) { start 398 drivers/net/ethernet/mellanox/mlx4/icm.c while (i > start) { start 407 drivers/net/ethernet/mellanox/mlx4/icm.c u32 start, u32 end) start 411 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) start 87 drivers/net/ethernet/mellanox/mlx4/icm.h u32 start, u32 end); start 89 drivers/net/ethernet/mellanox/mlx4/icm.h u32 start, u32 end); start 306 drivers/net/ethernet/mellanox/mlx4/mlx4.h __be64 start; start 599 drivers/net/ethernet/mellanox/mlx4/mr.c mpt_entry->start = cpu_to_be64(iova); start 649 drivers/net/ethernet/mellanox/mlx4/mr.c mpt_entry->start = cpu_to_be64(mr->iova); start 1029 drivers/net/ethernet/mellanox/mlx4/mr.c fmr->mpt->start = cpu_to_be64(iova); start 1135 drivers/net/ethernet/mellanox/mlx4/mr.c fmr->mpt->start = 0; start 77 drivers/net/ethernet/mellanox/mlx4/profile.c u64 start; start 158 drivers/net/ethernet/mellanox/mlx4/profile.c profile[i].start = total_size; start 174 drivers/net/ethernet/mellanox/mlx4/profile.c (unsigned long long) profile[i].start, start 185 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->qpc_base = profile[i].start; start 194 drivers/net/ethernet/mellanox/mlx4/profile.c priv->qp_table.rdmarc_base = (u32) profile[i].start; start 195 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->rdmarc_base = profile[i].start; start 199 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->altc_base = profile[i].start; start 202 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->auxc_base = profile[i].start; start 206 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->srqc_base = profile[i].start; start 211 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->cqc_base = profile[i].start; start 217 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->eqc_base = profile[i].start; start 224 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->eqc_base = profile[i].start; start 230 drivers/net/ethernet/mellanox/mlx4/profile.c priv->mr_table.mpt_base = profile[i].start; start 231 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->dmpt_base = profile[i].start; start 235 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->cmpt_base = profile[i].start; start 239 drivers/net/ethernet/mellanox/mlx4/profile.c priv->mr_table.mtt_base = profile[i].start; start 240 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->mtt_base = profile[i].start; start 243 drivers/net/ethernet/mellanox/mlx4/profile.c init_hca->mc_base = profile[i].start; start 2743 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, start 2749 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (start < res_start || start + size > res_start + res_size) start 3158 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, start 3169 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (!check_mtt_range(dev, slave, start, len, mtt)) { start 3273 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c int start = be64_to_cpu(page_list[0]); start 3277 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = get_containing_mtt(dev, slave, start, npages, &rmtt); start 28 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) start 333 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); start 137 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) start 139 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; start 355 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); start 358 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c memcpy(eseg->inline_hdr.start, skb->data, ihs); start 663 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c memcpy(eseg->inline_hdr.start, skb->data, ihs); start 744 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c struct list_head *start, start 755 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c struct fs_node *iter = list_entry(start, struct fs_node, list); start 116 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c tstart = clock->pps_info.start[i]; start 117 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c clock->pps_info.start[i] = 0; start 328 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ts.tv_sec = rq->perout.start.sec; start 329 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ts.tv_nsec = rq->perout.start.nsec; start 503 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c clock->pps_info.start[pin] = cycles_now + cycles_delta; start 44 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start = tblsz; start 51 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start = 0; start 62 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c if (dev->roce.reserved_gids.start < count) { start 72 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start -= count; start 76 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start); start 86 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start += count; start 90 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start); start 95 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c int end = dev->roce.reserved_gids.start + start 100 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c dev->roce.reserved_gids.start, end, start 10 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c #define MLX5_EXTRACT(src, start, len) \ start 11 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c (((len) == 32) ? (src) : MLX5_EXTRACT_C(src, start, len)) start 16 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c #define MLX5_MERGE_C(rsrc1, rsrc2, start, len) \ start 17 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c ((((rsrc2) << (start)) & (MLX5_MASK32((start), (len)))) | \ start 18 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c ((rsrc1) & (~MLX5_MASK32((start), (len))))) start 19 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c #define MLX5_MERGE(rsrc1, rsrc2, start, len) \ start 20 drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c (((len) == 32) ? (rsrc2) : MLX5_MERGE_C(rsrc1, rsrc2, start, len)) start 223 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c u8 start; start 231 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47, start 234 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15, start 237 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47, start 240 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47, start 243 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15, start 246 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5, start 249 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56, start 253 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15, start 257 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31, start 261 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15, start 265 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15, start 269 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15, start 273 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31, start 277 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63, start 281 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31, start 285 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63, start 289 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31, start 293 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63, start 297 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31, start 301 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63, start 305 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31, start 309 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31, start 313 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63, start 317 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31, start 320 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63, start 323 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63, start 326 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31, start 329 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63, start 332 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31, start 335 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63, start 338 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31, start 341 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63, start 344 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31, start 347 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15, start 1205 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c if (!hw_action_info->end && !hw_action_info->start) start 1240 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c max_length = hw_action_info->end - hw_action_info->start + 1; start 1272 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c hw_action_info->start + offset); start 1021 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned int start; start 1027 drivers/net/ethernet/mellanox/mlxsw/spectrum.c start = u64_stats_fetch_begin_irq(&p->syncp); start 1032 drivers/net/ethernet/mellanox/mlxsw/spectrum.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 405 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c aentry->delta_info.start, start 434 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c aentry->delta_info.start, start 463 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c aentry->delta_info.start, start 499 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); start 1086 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c u16 start; start 1092 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c return delta->start; start 1103 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c u16 start = delta->start; start 1110 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)]; start 1111 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c if (start / 8 + 1 < __MASK_LEN) start 1112 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8; start 1113 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c tmp >>= start % 8; start 1121 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c u16 start = delta->start; start 1127 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c tmp <<= start % 8; start 1130 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)]; start 1132 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c if (start / 8 + 1 < __MASK_LEN) { start 1133 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)]; start 1252 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c delta->start = delta_start; start 191 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h u16 start; start 1357 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_ipip_entry *start) start 1361 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list, start 356 drivers/net/ethernet/mellanox/mlxsw/switchx2.c unsigned int start; start 362 drivers/net/ethernet/mellanox/mlxsw/switchx2.c start = u64_stats_fetch_begin_irq(&p->syncp); start 367 drivers/net/ethernet/mellanox/mlxsw/switchx2.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 1139 drivers/net/ethernet/micrel/ks8842.c if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) start 1151 drivers/net/ethernet/micrel/ks8842.c adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); start 1222 drivers/net/ethernet/micrel/ks8842.c release_mem_region(iomem->start, resource_size(iomem)); start 1237 drivers/net/ethernet/micrel/ks8842.c release_mem_region(iomem->start, resource_size(iomem)); start 6098 drivers/net/ethernet/micrel/ksz884x.c int start; start 6115 drivers/net/ethernet/micrel/ksz884x.c while (range->end > range->start) { start 6116 drivers/net/ethernet/micrel/ksz884x.c regs_len += (range->end - range->start + 3) / 4 * 4; start 6146 drivers/net/ethernet/micrel/ksz884x.c while (range->end > range->start) { start 6147 drivers/net/ethernet/micrel/ksz884x.c for (len = range->start; len < range->end; len += 4) { start 565 drivers/net/ethernet/microchip/enc28j60.c static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end) start 569 drivers/net/ethernet/microchip/enc28j60.c if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end)) start 588 drivers/net/ethernet/microchip/enc28j60.c static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) start 593 drivers/net/ethernet/microchip/enc28j60.c if (start > 0x1FFF || end > 0x1FFF || start > end) { start 596 drivers/net/ethernet/microchip/enc28j60.c __func__, start, end); start 600 drivers/net/ethernet/microchip/enc28j60.c priv->next_pk_ptr = start; start 601 drivers/net/ethernet/microchip/enc28j60.c nolock_regw_write(priv, ERXSTL, start); start 602 drivers/net/ethernet/microchip/enc28j60.c erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end); start 607 drivers/net/ethernet/microchip/enc28j60.c static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) start 611 drivers/net/ethernet/microchip/enc28j60.c if (start > 0x1FFF || end > 0x1FFF || start > end) { start 614 drivers/net/ethernet/microchip/enc28j60.c __func__, start, end); start 618 drivers/net/ethernet/microchip/enc28j60.c nolock_regw_write(priv, ETXSTL, start); start 474 drivers/net/ethernet/microchip/lan743x_ptp.c start_sec = perout->start.sec; start 475 drivers/net/ethernet/microchip/lan743x_ptp.c start_sec += perout->start.nsec / 1000000000; start 476 drivers/net/ethernet/microchip/lan743x_ptp.c start_nsec = perout->start.nsec % 1000000000; start 484 drivers/net/ethernet/moxa/moxart_ether.c ndev->base_addr = res->start; start 225 drivers/net/ethernet/natsemi/jazzsonic.c dev->base_addr = res->start; start 1700 drivers/net/ethernet/natsemi/ns83820.c unsigned long start; start 1706 drivers/net/ethernet/natsemi/ns83820.c start = jiffies; start 1718 drivers/net/ethernet/natsemi/ns83820.c if (time_after_eq(jiffies, start + HZ)) { start 256 drivers/net/ethernet/natsemi/xtsonic.c dev->base_addr = resmem->start; start 257 drivers/net/ethernet/natsemi/xtsonic.c dev->irq = resirq->start; start 2293 drivers/net/ethernet/neterion/vxge/vxge-main.c start: start 2366 drivers/net/ethernet/neterion/vxge/vxge-main.c goto start; start 3111 drivers/net/ethernet/neterion/vxge/vxge-main.c unsigned int start; start 3115 drivers/net/ethernet/neterion/vxge/vxge-main.c start = u64_stats_fetch_begin_irq(&rxstats->syncp); start 3120 drivers/net/ethernet/neterion/vxge/vxge-main.c } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); start 3130 drivers/net/ethernet/neterion/vxge/vxge-main.c start = u64_stats_fetch_begin_irq(&txstats->syncp); start 3134 drivers/net/ethernet/neterion/vxge/vxge-main.c } while (u64_stats_fetch_retry_irq(&txstats->syncp, start)); start 107 drivers/net/ethernet/netronome/nfp/abm/qdisc.c unsigned int start, unsigned int end) start 111 drivers/net/ethernet/netronome/nfp/abm/qdisc.c for (i = start; i < end; i++) start 334 drivers/net/ethernet/netronome/nfp/bpf/main.c u8 __iomem *mem, *start; start 341 drivers/net/ethernet/netronome/nfp/bpf/main.c start = mem; start 342 drivers/net/ethernet/netronome/nfp/bpf/main.c while (mem - start + 8 <= nfp_cpp_area_size(area)) { start 351 drivers/net/ethernet/netronome/nfp/bpf/main.c if (mem - start > nfp_cpp_area_size(area)) start 396 drivers/net/ethernet/netronome/nfp/bpf/main.c if (mem - start != nfp_cpp_area_size(area)) { start 398 drivers/net/ethernet/netronome/nfp/bpf/main.c mem - start, nfp_cpp_area_size(area)); start 407 drivers/net/ethernet/netronome/nfp/bpf/main.c nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start); start 523 drivers/net/ethernet/netronome/nfp/bpf/main.c .start = nfp_bpf_start, start 951 drivers/net/ethernet/netronome/nfp/flower/main.c .start = nfp_flower_start, start 201 drivers/net/ethernet/netronome/nfp/nfp_app.c if (app->type->start) { start 202 drivers/net/ethernet/netronome/nfp/nfp_app.c err = app->type->start(app); start 123 drivers/net/ethernet/netronome/nfp/nfp_app.h int (*start)(struct nfp_app *app); start 3377 drivers/net/ethernet/netronome/nfp/nfp_net_common.c unsigned int start; start 3380 drivers/net/ethernet/netronome/nfp/nfp_net_common.c start = u64_stats_fetch_begin(&r_vec->rx_sync); start 3384 drivers/net/ethernet/netronome/nfp/nfp_net_common.c } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); start 3390 drivers/net/ethernet/netronome/nfp/nfp_net_common.c start = u64_stats_fetch_begin(&r_vec->tx_sync); start 3394 drivers/net/ethernet/netronome/nfp/nfp_net_common.c } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); start 469 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c unsigned int start; start 472 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); start 480 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); start 483 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); start 493 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); start 132 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c unsigned int start; start 136 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c start = u64_stats_fetch_begin_irq(&repr_stats->syncp); start 142 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start)); start 852 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c priv->resource.start = priv->phys; start 853 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c priv->resource.end = priv->resource.start + priv->size - 1; start 34 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c u64 start; start 118 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (tmp->cpp_id == res->cpp_id && tmp->start > res->start) start 188 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c res->start, res->end, start 314 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area->resource.start = address; start 315 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area->resource.end = area->resource.start + size - 1; start 88 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c u8 start[0]; start 141 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c if (crc != get_unaligned_le32(db->start + size)) { start 143 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c crc, get_unaligned_le32(db->start + size)); start 33 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c static int target_rw(u32 cpp_id, int pp, int start, int len) start 5394 drivers/net/ethernet/nvidia/forcedeth.c unsigned long start; start 5399 drivers/net/ethernet/nvidia/forcedeth.c start = jiffies; start 5400 drivers/net/ethernet/nvidia/forcedeth.c while (time_before(jiffies, start + 5*HZ)) { start 1284 drivers/net/ethernet/nxp/lpc_eth.c pldat->net_base = ioremap(res->start, resource_size(res)); start 1389 drivers/net/ethernet/nxp/lpc_eth.c (unsigned long)res->start, ndev->irq); start 1381 drivers/net/ethernet/packetengines/hamachi.c if (dev->start == 0 && --stopit < 0) { start 514 drivers/net/ethernet/pasemi/pasemi_mac.c int start, limit; start 516 drivers/net/ethernet/pasemi/pasemi_mac.c start = txring->next_to_clean; start 520 drivers/net/ethernet/pasemi/pasemi_mac.c if (start > limit) start 523 drivers/net/ethernet/pasemi/pasemi_mac.c for (i = start; i < limit; i += freed) { start 819 drivers/net/ethernet/pasemi/pasemi_mac.c unsigned int start, descr_count, buf_count, batch_limit; start 833 drivers/net/ethernet/pasemi/pasemi_mac.c start = txring->next_to_clean; start 836 drivers/net/ethernet/pasemi/pasemi_mac.c prefetch(&TX_DESC_INFO(txring, start+1).skb); start 839 drivers/net/ethernet/pasemi/pasemi_mac.c if (start > ring_limit) start 845 drivers/net/ethernet/pasemi/pasemi_mac.c for (i = start; start 295 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); start 509 drivers/net/ethernet/pensando/ionic/ionic_txrx.c bool start, bool done) start 516 drivers/net/ethernet/pensando/ionic/ionic_txrx.c flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; start 566 drivers/net/ethernet/pensando/ionic/ionic_txrx.c bool start, done; start 606 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start = true; start 628 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start, done); start 630 drivers/net/ethernet/pensando/ionic/ionic_txrx.c total_bytes += start ? len : len + hdrlen; start 632 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start = false; start 666 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start, done); start 668 drivers/net/ethernet/pensando/ionic/ionic_txrx.c total_bytes += start ? len : len + hdrlen; start 670 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start = false; start 689 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start, done); start 691 drivers/net/ethernet/pensando/ionic/ionic_txrx.c total_bytes += start ? len : len + hdrlen; start 693 drivers/net/ethernet/pensando/ionic/ionic_txrx.c start = false; start 1334 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c u64 addr, u32 *start) start 1337 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); start 1341 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); start 1350 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c u64 addr, u32 *start) start 1361 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); start 1372 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c u32 start; start 1376 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c ret = adapter->pci_set_window(adapter, off, &start); start 1381 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c addr = adapter->ahw.pci_base0 + start; start 1383 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c addr = pci_base_offset(adapter, start); start 1388 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c (start & PAGE_MASK); start 1395 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c addr = mem_ptr + (start & (PAGE_SIZE-1)); start 1040 drivers/net/ethernet/qlogic/qed/qed_fcoe.c .start = &qed_fcoe_start, start 1435 drivers/net/ethernet/qlogic/qed/qed_iscsi.c .start = &qed_iscsi_start, start 2323 drivers/net/ethernet/qlogic/qed/qed_l2.c struct qed_sp_vport_start_params start = { 0 }; start 2326 drivers/net/ethernet/qlogic/qed/qed_l2.c start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : start 2328 drivers/net/ethernet/qlogic/qed/qed_l2.c start.remove_inner_vlan = params->remove_inner_vlan; start 2329 drivers/net/ethernet/qlogic/qed/qed_l2.c start.only_untagged = true; /* untagged only */ start 2330 drivers/net/ethernet/qlogic/qed/qed_l2.c start.drop_ttl0 = params->drop_ttl0; start 2331 drivers/net/ethernet/qlogic/qed/qed_l2.c start.opaque_fid = p_hwfn->hw_info.opaque_fid; start 2332 drivers/net/ethernet/qlogic/qed/qed_l2.c start.concrete_fid = p_hwfn->hw_info.concrete_fid; start 2333 drivers/net/ethernet/qlogic/qed/qed_l2.c start.handle_ptp_pkts = params->handle_ptp_pkts; start 2334 drivers/net/ethernet/qlogic/qed/qed_l2.c start.vport_id = params->vport_id; start 2335 drivers/net/ethernet/qlogic/qed/qed_l2.c start.max_buffers_per_cqe = 16; start 2336 drivers/net/ethernet/qlogic/qed/qed_l2.c start.mtu = params->mtu; start 2338 drivers/net/ethernet/qlogic/qed/qed_l2.c rc = qed_sp_vport_start(p_hwfn, &start); start 2352 drivers/net/ethernet/qlogic/qed/qed_l2.c start.vport_id, start.mtu); start 2640 drivers/net/ethernet/qlogic/qed/qed_ll2.c .start = &qed_ll2_start, start 1913 drivers/net/ethernet/qlogic/qed/qed_sriov.c struct vfpf_vport_start_tlv *start; start 1929 drivers/net/ethernet/qlogic/qed/qed_sriov.c start = &mbx->req_virt->start_vport; start 1935 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (!start->sb_addr[sb_id]) { start 1943 drivers/net/ethernet/qlogic/qed/qed_sriov.c start->sb_addr[sb_id], start 1947 drivers/net/ethernet/qlogic/qed/qed_sriov.c vf->mtu = start->mtu; start 1948 drivers/net/ethernet/qlogic/qed/qed_sriov.c vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; start 1956 drivers/net/ethernet/qlogic/qed/qed_sriov.c u8 vf_req = start->only_untagged; start 1962 drivers/net/ethernet/qlogic/qed/qed_sriov.c params.tpa_mode = start->tpa_mode; start 1963 drivers/net/ethernet/qlogic/qed/qed_sriov.c params.remove_inner_vlan = start->inner_vlan_removal; start 1971 drivers/net/ethernet/qlogic/qed/qed_sriov.c params.max_buffers_per_cqe = start->max_buffers_per_cqe; start 2039 drivers/net/ethernet/qlogic/qede/qede_main.c struct qed_start_vport_params start = {0}; start 2052 drivers/net/ethernet/qlogic/qede/qede_main.c start.handle_ptp_pkts = !!(edev->ptp); start 2053 drivers/net/ethernet/qlogic/qede/qede_main.c start.gro_enable = !edev->gro_disable; start 2054 drivers/net/ethernet/qlogic/qede/qede_main.c start.mtu = edev->ndev->mtu; start 2055 drivers/net/ethernet/qlogic/qede/qede_main.c start.vport_id = 0; start 2056 drivers/net/ethernet/qlogic/qede/qede_main.c start.drop_ttl0 = true; start 2057 drivers/net/ethernet/qlogic/qede/qede_main.c start.remove_inner_vlan = vlan_removal_en; start 2058 drivers/net/ethernet/qlogic/qede/qede_main.c start.clear_stats = clear_stats; start 2060 drivers/net/ethernet/qlogic/qede/qede_main.c rc = edev->ops->vport_start(cdev, &start); start 2069 drivers/net/ethernet/qlogic/qede/qede_main.c start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); start 2139 drivers/net/ethernet/qlogic/qede/qede_main.c vport_update_params->vport_id = start.vport_id; start 1253 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c u32 start; start 1260 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c start = QLCNIC_PCI_OCM0_2M + off; start 1262 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c addr = adapter->ahw->pci_base0 + start; start 402 drivers/net/ethernet/qualcomm/emac/emac-sgmii.c phy->base = ioremap(res->start, resource_size(res)); start 411 drivers/net/ethernet/qualcomm/emac/emac-sgmii.c phy->digital = ioremap(res->start, resource_size(res)); start 109 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c unsigned int cpu, start; start 117 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); start 122 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); start 2522 drivers/net/ethernet/realtek/8139too.c unsigned int start; start 2534 drivers/net/ethernet/realtek/8139too.c start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); start 2537 drivers/net/ethernet/realtek/8139too.c } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); start 2540 drivers/net/ethernet/realtek/8139too.c start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); start 2543 drivers/net/ethernet/realtek/8139too.c } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); start 52 drivers/net/ethernet/realtek/r8169_firmware.c size_t i, size, start; start 63 drivers/net/ethernet/realtek/r8169_firmware.c start = le32_to_cpu(fw_info->fw_start); start 64 drivers/net/ethernet/realtek/r8169_firmware.c if (start > fw->size) start 68 drivers/net/ethernet/realtek/r8169_firmware.c if (size > (fw->size - start) / FW_OPCODE_SIZE) start 73 drivers/net/ethernet/realtek/r8169_firmware.c pa->code = (__le32 *)(fw->data + start); start 5627 drivers/net/ethernet/realtek/r8169_main.c static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, start 5633 drivers/net/ethernet/realtek/r8169_main.c unsigned int entry = (start + i) % NUM_TX_DESC; start 6526 drivers/net/ethernet/realtek/r8169_main.c unsigned int start; start 6534 drivers/net/ethernet/realtek/r8169_main.c start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); start 6537 drivers/net/ethernet/realtek/r8169_main.c } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); start 6540 drivers/net/ethernet/realtek/r8169_main.c start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); start 6543 drivers/net/ethernet/realtek/r8169_main.c } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); start 2019 drivers/net/ethernet/renesas/ravb_main.c ndev->base_addr = res->start; start 232 drivers/net/ethernet/renesas/ravb_ptp.c start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec; start 3274 drivers/net/ethernet/renesas/sh_eth.c ndev->base_addr = res->start; start 3353 drivers/net/ethernet/renesas/sh_eth.c !devm_request_mem_region(&pdev->dev, rtsu->start, start 3361 drivers/net/ethernet/renesas/sh_eth.c mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, start 240 drivers/net/ethernet/rocker/rocker_ofdpa.c u16 start = OFDPA_INTERNAL_VLAN_ID_BASE; start 244 drivers/net/ethernet/rocker/rocker_ofdpa.c return (_vlan_id >= start && _vlan_id <= end); start 189 drivers/net/ethernet/rocker/rocker_tlv.h struct rocker_tlv *start = rocker_tlv_start(desc_info); start 194 drivers/net/ethernet/rocker/rocker_tlv.h return start; start 198 drivers/net/ethernet/rocker/rocker_tlv.h struct rocker_tlv *start) start 200 drivers/net/ethernet/rocker/rocker_tlv.h start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; start 204 drivers/net/ethernet/rocker/rocker_tlv.h const struct rocker_tlv *start) start 206 drivers/net/ethernet/rocker/rocker_tlv.h desc_info->tlv_size = (const char *) start - desc_info->data; start 115 drivers/net/ethernet/seeq/ether3.c ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) start 132 drivers/net/ethernet/seeq/ether3.c ether3_outw(start, REG_DMAADDR); start 136 drivers/net/ethernet/seeq/ether3.c ether3_outw(start, REG_DMAADDR); start 617 drivers/net/ethernet/sfc/falcon/falcon.c loff_t start, size_t len, size_t *retlen, u8 *buffer) start 626 drivers/net/ethernet/sfc/falcon/falcon.c command = falcon_spi_munge_command(spi, SPI_READ, start + pos); start 627 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, start 658 drivers/net/ethernet/sfc/falcon/falcon.c falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start) start 661 drivers/net/ethernet/sfc/falcon/falcon.c (spi->block_size - (start & (spi->block_size - 1)))); start 692 drivers/net/ethernet/sfc/falcon/falcon.c loff_t start, size_t len, size_t *retlen, const u8 *buffer) start 705 drivers/net/ethernet/sfc/falcon/falcon.c falcon_spi_write_limit(spi, start + pos)); start 706 drivers/net/ethernet/sfc/falcon/falcon.c command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos); start 707 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_cmd(efx, spi, command, start + pos, start 716 drivers/net/ethernet/sfc/falcon/falcon.c command = falcon_spi_munge_command(spi, SPI_READ, start + pos); start 717 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_cmd(efx, spi, command, start + pos, start 804 drivers/net/ethernet/sfc/falcon/falcon.c falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len) start 825 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, start 835 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_read(efx, spi, start + pos, block_len, start 859 drivers/net/ethernet/sfc/falcon/falcon.c static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, start 870 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_read(efx, part->spi, part->offset + start, start 876 drivers/net/ethernet/sfc/falcon/falcon.c static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) start 886 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_erase(part, part->offset + start, len); start 891 drivers/net/ethernet/sfc/falcon/falcon.c static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, start 902 drivers/net/ethernet/sfc/falcon/falcon.c rc = falcon_spi_write(efx, part->spi, part->offset + start, start 197 drivers/net/ethernet/sfc/falcon/farch.c unsigned int start = buffer->index; start 210 drivers/net/ethernet/sfc/falcon/farch.c FRF_AZ_BUF_CLR_START_ID, start); start 1131 drivers/net/ethernet/sfc/falcon/net_driver.h int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, start 1133 drivers/net/ethernet/sfc/falcon/net_driver.h int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); start 1134 drivers/net/ethernet/sfc/falcon/net_driver.h int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, start 199 drivers/net/ethernet/sfc/farch.c unsigned int start = buffer->index; start 212 drivers/net/ethernet/sfc/farch.c FRF_AZ_BUF_CLR_START_ID, start); start 2201 drivers/net/ethernet/sfc/mcdi.c int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, start 2206 drivers/net/ethernet/sfc/mcdi.c loff_t offset = start; start 2207 drivers/net/ethernet/sfc/mcdi.c loff_t end = min_t(loff_t, start + len, mtd->size); start 2221 drivers/net/ethernet/sfc/mcdi.c *retlen = offset - start; start 2225 drivers/net/ethernet/sfc/mcdi.c int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) start 2229 drivers/net/ethernet/sfc/mcdi.c loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); start 2230 drivers/net/ethernet/sfc/mcdi.c loff_t end = min_t(loff_t, start + len, mtd->size); start 2255 drivers/net/ethernet/sfc/mcdi.c int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, start 2260 drivers/net/ethernet/sfc/mcdi.c loff_t offset = start; start 2261 drivers/net/ethernet/sfc/mcdi.c loff_t end = min_t(loff_t, start + len, mtd->size); start 2282 drivers/net/ethernet/sfc/mcdi.c *retlen = offset - start; start 374 drivers/net/ethernet/sfc/mcdi.h int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, start 376 drivers/net/ethernet/sfc/mcdi.h int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); start 377 drivers/net/ethernet/sfc/mcdi.h int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, start 1376 drivers/net/ethernet/sfc/net_driver.h int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, start 1378 drivers/net/ethernet/sfc/net_driver.h int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); start 1379 drivers/net/ethernet/sfc/net_driver.h int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, start 320 drivers/net/ethernet/sfc/ptp.c struct efx_buffer start; start 862 drivers/net/ethernet/sfc/ptp.c int *mc_running = ptp->start.addr; start 1034 drivers/net/ethernet/sfc/ptp.c int *start = ptp->start.addr; start 1041 drivers/net/ethernet/sfc/ptp.c ptp->start.dma_addr); start 1044 drivers/net/ethernet/sfc/ptp.c WRITE_ONCE(*start, 0); start 1051 drivers/net/ethernet/sfc/ptp.c while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { start 1061 drivers/net/ethernet/sfc/ptp.c if (READ_ONCE(*start)) start 1457 drivers/net/ethernet/sfc/ptp.c rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); start 1524 drivers/net/ethernet/sfc/ptp.c efx_nic_free_buffer(efx, &ptp->start); start 1579 drivers/net/ethernet/sfc/ptp.c efx_nic_free_buffer(efx, &efx->ptp_data->start); start 513 drivers/net/ethernet/sis/sis190.c u32 start, u32 end) start 517 drivers/net/ethernet/sis/sis190.c for (cur = start; cur < end; cur++) { start 528 drivers/net/ethernet/sis/sis190.c return cur - start; start 2058 drivers/net/ethernet/smsc/smc911x.c if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { start 2085 drivers/net/ethernet/smsc/smc911x.c addr = ioremap(res->start, SMC911X_IO_EXTENT); start 2093 drivers/net/ethernet/smsc/smc911x.c ndev->base_addr = res->start; start 2100 drivers/net/ethernet/smsc/smc911x.c release_mem_region(res->start, SMC911X_IO_EXTENT); start 2106 drivers/net/ethernet/smsc/smc911x.c lp->physaddr = res->start; start 2136 drivers/net/ethernet/smsc/smc911x.c release_mem_region(res->start, SMC911X_IO_EXTENT); start 404 drivers/net/ethernet/smsc/smc91c92_cs.c p_dev->resource[1]->start = p_dev->resource[0]->start; start 414 drivers/net/ethernet/smsc/smc91c92_cs.c p_dev->resource[0]->start = k ^ 0x300; start 436 drivers/net/ethernet/smsc/smc91c92_cs.c dev->base_addr = link->resource[0]->start; start 440 drivers/net/ethernet/smsc/smc91c92_cs.c link->resource[2]->start = link->resource[2]->end = 0; start 445 drivers/net/ethernet/smsc/smc91c92_cs.c smc->base = ioremap(link->resource[2]->start, start 521 drivers/net/ethernet/smsc/smc91c92_cs.c unsigned int iouart = link->resource[1]->start; start 589 drivers/net/ethernet/smsc/smc91c92_cs.c dev->base_addr = link->resource[0]->start; start 629 drivers/net/ethernet/smsc/smc91c92_cs.c link->resource[1]->start = com[j]; start 640 drivers/net/ethernet/smsc/smc91c92_cs.c dev->base_addr = link->resource[0]->start + 0x10; start 657 drivers/net/ethernet/smsc/smc91c92_cs.c outb(fw->data[i], link->resource[0]->start + 2); start 699 drivers/net/ethernet/smsc/smc91c92_cs.c set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR); start 701 drivers/net/ethernet/smsc/smc91c92_cs.c set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR); start 703 drivers/net/ethernet/smsc/smc91c92_cs.c inw(link->resource[0]->start + OSITECH_AUI_PWR), start 704 drivers/net/ethernet/smsc/smc91c92_cs.c inw(link->resource[0]->start + OSITECH_RESET_ISR)); start 2072 drivers/net/ethernet/smsc/smc91x.c addr = ioremap(res->start, ATTRIB_SIZE); start 2128 drivers/net/ethernet/smsc/smc91x.c if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME)) start 2141 drivers/net/ethernet/smsc/smc91x.c release_mem_region(res->start, ATTRIB_SIZE); start 2153 drivers/net/ethernet/smsc/smc91x.c if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { start 2159 drivers/net/ethernet/smsc/smc91x.c lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); start 2175 drivers/net/ethernet/smsc/smc91x.c release_mem_region(res->start, SMC_DATA_EXTENT); start 2338 drivers/net/ethernet/smsc/smc91x.c if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { start 2369 drivers/net/ethernet/smsc/smc91x.c addr = ioremap(res->start, SMC_IO_EXTENT); start 2379 drivers/net/ethernet/smsc/smc91x.c lp->physaddr = res->start; start 2397 drivers/net/ethernet/smsc/smc91x.c release_mem_region(res->start, SMC_IO_EXTENT); start 2428 drivers/net/ethernet/smsc/smc91x.c release_mem_region(res->start, SMC_IO_EXTENT); start 2339 drivers/net/ethernet/smsc/smsc911x.c release_mem_region(res->start, resource_size(res)); start 2442 drivers/net/ethernet/smsc/smsc911x.c if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { start 2457 drivers/net/ethernet/smsc/smsc911x.c pdata->ioaddr = ioremap_nocache(res->start, res_size); start 2571 drivers/net/ethernet/smsc/smsc911x.c release_mem_region(res->start, resource_size(res)); start 1986 drivers/net/ethernet/socionext/netsec.c ndev->irq = irq_res->start; start 2000 drivers/net/ethernet/socionext/netsec.c priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, start 2008 drivers/net/ethernet/socionext/netsec.c priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, start 1506 drivers/net/ethernet/socionext/sni_ave.c unsigned int start; start 1509 drivers/net/ethernet/socionext/sni_ave.c start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); start 1512 drivers/net/ethernet/socionext/sni_ave.c } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); start 1515 drivers/net/ethernet/socionext/sni_ave.c start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); start 1518 drivers/net/ethernet/socionext/sni_ave.c } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); start 260 drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c dwmac->clk_sel_reg = res->start; start 527 drivers/net/ethernet/stmicro/stmmac/dwmac5.c writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); start 530 drivers/net/ethernet/stmicro/stmmac/dwmac5.c cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; start 531 drivers/net/ethernet/stmicro/stmmac/dwmac5.c writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); start 1106 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); start 1109 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; start 1110 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); start 121 drivers/net/ethernet/stmicro/stmmac/stmmac.h struct timespec64 start; start 149 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c cfg->start.tv_sec = rq->perout.start.sec; start 150 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c cfg->start.tv_nsec = rq->perout.start.nsec; start 6577 drivers/net/ethernet/sun/niu.c u64 start, stuff; start 6584 drivers/net/ethernet/sun/niu.c start = skb_checksum_start_offset(skb) - start 6586 drivers/net/ethernet/sun/niu.c stuff = start + skb->csum_offset; start 6588 drivers/net/ethernet/sun/niu.c csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; start 8071 drivers/net/ethernet/sun/niu.c static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) start 8083 drivers/net/ethernet/sun/niu.c "VPD_SCAN: start[%x] end[%x]\n", start, end); start 8084 drivers/net/ethernet/sun/niu.c while (start < end) { start 8095 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read(np, start + 2); start 8099 drivers/net/ethernet/sun/niu.c start += 3; start 8101 drivers/net/ethernet/sun/niu.c prop_len = niu_pci_eeprom_read(np, start + 4); start 8104 drivers/net/ethernet/sun/niu.c err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); start 8142 drivers/net/ethernet/sun/niu.c u32 off = start + 5 + err; start 8156 drivers/net/ethernet/sun/niu.c start += len; start 8163 drivers/net/ethernet/sun/niu.c static void niu_pci_vpd_fetch(struct niu *np, u32 start) start 8168 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read16_swp(np, start + 1); start 8174 drivers/net/ethernet/sun/niu.c while (start + offset < ESPC_EEPROM_SIZE) { start 8175 drivers/net/ethernet/sun/niu.c u32 here = start + offset; start 8186 drivers/net/ethernet/sun/niu.c here = start + offset + 3; start 8187 drivers/net/ethernet/sun/niu.c end = start + offset + err; start 8200 drivers/net/ethernet/sun/niu.c u32 start = 0, end = ESPC_EEPROM_SIZE, ret; start 8203 drivers/net/ethernet/sun/niu.c while (start < end) { start 8204 drivers/net/ethernet/sun/niu.c ret = start; start 8207 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read16(np, start + 0); start 8212 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read16(np, start + 23); start 8215 drivers/net/ethernet/sun/niu.c start += err; start 8218 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read16(np, start + 0); start 8221 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read16(np, start + 2); start 8226 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read(np, start + 20); start 8234 drivers/net/ethernet/sun/niu.c start = ret + (err * 512); start 8238 drivers/net/ethernet/sun/niu.c err = niu_pci_eeprom_read16_swp(np, start + 8); start 46 drivers/net/ethernet/sun/sunvnet_common.c static int __vnet_tx_trigger(struct vnet_port *port, u32 start); start 470 drivers/net/ethernet/sun/sunvnet_common.c u32 start, u32 end, u8 vio_dring_state) start 480 drivers/net/ethernet/sun/sunvnet_common.c .start_idx = start, start 587 drivers/net/ethernet/sun/sunvnet_common.c u32 start, u32 end, int *npkts, int budget) start 593 drivers/net/ethernet/sun/sunvnet_common.c end = (end == (u32)-1) ? vio_dring_prev(dr, start) start 596 drivers/net/ethernet/sun/sunvnet_common.c viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); start 598 drivers/net/ethernet/sun/sunvnet_common.c while (start != end) { start 599 drivers/net/ethernet/sun/sunvnet_common.c int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); start 607 drivers/net/ethernet/sun/sunvnet_common.c ack_start = start; start 608 drivers/net/ethernet/sun/sunvnet_common.c ack_end = start; start 609 drivers/net/ethernet/sun/sunvnet_common.c start = vio_dring_next(dr, start); start 610 drivers/net/ethernet/sun/sunvnet_common.c if (ack && start != end) { start 623 drivers/net/ethernet/sun/sunvnet_common.c ack_end = vio_dring_prev(dr, start); start 939 drivers/net/ethernet/sun/sunvnet_common.c static int __vnet_tx_trigger(struct vnet_port *port, u32 start) start 950 drivers/net/ethernet/sun/sunvnet_common.c .start_idx = start, start 983 drivers/net/ethernet/sun/sunvnet_common.c port->vio._peer_sid, start, err); start 1132 drivers/net/ethernet/sun/sunvnet_common.c int start = 0, offset; start 1156 drivers/net/ethernet/sun/sunvnet_common.c start = skb_checksum_start_offset(skb); start 1157 drivers/net/ethernet/sun/sunvnet_common.c if (start) { start 1158 drivers/net/ethernet/sun/sunvnet_common.c int offset = start + nskb->csum_offset; start 1161 drivers/net/ethernet/sun/sunvnet_common.c if (skb_copy_bits(skb, 0, nskb->data, start)) { start 1169 drivers/net/ethernet/sun/sunvnet_common.c csum = skb_copy_and_csum_bits(skb, start, start 1170 drivers/net/ethernet/sun/sunvnet_common.c nskb->data + start, start 1171 drivers/net/ethernet/sun/sunvnet_common.c skb->len - start, 0); start 1181 drivers/net/ethernet/sun/sunvnet_common.c skb->len - start, start 1192 drivers/net/ethernet/sun/sunvnet_common.c skb->len - start, start 1389 drivers/net/ethernet/tehuti/tehuti.c BDX_ASSERT(*pptr < db->start || /* pointer has to be */ start 1394 drivers/net/ethernet/tehuti/tehuti.c *pptr = db->start; start 1429 drivers/net/ethernet/tehuti/tehuti.c d->start = vmalloc(memsz); start 1430 drivers/net/ethernet/tehuti/tehuti.c if (!d->start) start 1439 drivers/net/ethernet/tehuti/tehuti.c d->end = d->start + d->size + 1; /* just after last element */ start 1442 drivers/net/ethernet/tehuti/tehuti.c d->rptr = d->start; start 1443 drivers/net/ethernet/tehuti/tehuti.c d->wptr = d->start; start 1456 drivers/net/ethernet/tehuti/tehuti.c vfree(d->start); start 1457 drivers/net/ethernet/tehuti/tehuti.c d->start = NULL; start 198 drivers/net/ethernet/tehuti/tehuti.h struct tx_map *start; /* points to the first element */ start 918 drivers/net/ethernet/ti/cpmac.c if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { start 926 drivers/net/ethernet/ti/cpmac.c priv->regs = ioremap(mem->start, resource_size(mem)); start 1004 drivers/net/ethernet/ti/cpmac.c release_mem_region(mem->start, resource_size(mem)); start 1032 drivers/net/ethernet/ti/cpmac.c release_mem_region(mem->start, resource_size(mem)); start 1149 drivers/net/ethernet/ti/cpmac.c "mac: %pM\n", (void *)mem->start, dev->irq, start 2853 drivers/net/ethernet/ti/cpsw.c ss_res->start + CPSW2_BD_OFFSET, start 2954 drivers/net/ethernet/ti/cpsw.c &ss_res->start, cpsw->irqs_table[0], descs_pool_size); start 61 drivers/net/ethernet/ti/cpsw_ale.c static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) start 65 drivers/net/ethernet/ti/cpsw_ale.c idx = start / 32; start 66 drivers/net/ethernet/ti/cpsw_ale.c start -= idx * 32; start 68 drivers/net/ethernet/ti/cpsw_ale.c return (ale_entry[idx] >> start) & BITMASK(bits); start 71 drivers/net/ethernet/ti/cpsw_ale.c static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, start 77 drivers/net/ethernet/ti/cpsw_ale.c idx = start / 32; start 78 drivers/net/ethernet/ti/cpsw_ale.c start -= idx * 32; start 80 drivers/net/ethernet/ti/cpsw_ale.c ale_entry[idx] &= ~(BITMASK(bits) << start); start 81 drivers/net/ethernet/ti/cpsw_ale.c ale_entry[idx] |= (value << start); start 84 drivers/net/ethernet/ti/cpsw_ale.c #define DEFINE_ALE_FIELD(name, start, bits) \ start 87 drivers/net/ethernet/ti/cpsw_ale.c return cpsw_ale_get_field(ale_entry, start, bits); \ start 91 drivers/net/ethernet/ti/cpsw_ale.c cpsw_ale_set_field(ale_entry, start, bits, value); \ start 94 drivers/net/ethernet/ti/cpsw_ale.c #define DEFINE_ALE_FIELD1(name, start) \ start 97 drivers/net/ethernet/ti/cpsw_ale.c return cpsw_ale_get_field(ale_entry, start, bits); \ start 102 drivers/net/ethernet/ti/cpsw_ale.c cpsw_ale_set_field(ale_entry, start, bits, value); \ start 1440 drivers/net/ethernet/ti/davinci_emac.c for (irq_num = res->start; irq_num <= res->end; irq_num++) { start 1546 drivers/net/ethernet/ti/davinci_emac.c for (m = irq_num; m >= res->start; m--) start 1585 drivers/net/ethernet/ti/davinci_emac.c for (irq_num = res->start; irq_num <= res->end; irq_num++) start 1821 drivers/net/ethernet/ti/davinci_emac.c priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; start 1845 drivers/net/ethernet/ti/davinci_emac.c hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; start 1863 drivers/net/ethernet/ti/davinci_emac.c (u32 __force)res->start + pdata->ctrl_ram_offset; start 1894 drivers/net/ethernet/ti/davinci_emac.c ndev->irq = res->start; start 400 drivers/net/ethernet/ti/davinci_mdio.c data->regs = devm_ioremap(dev, res->start, resource_size(res)); start 1917 drivers/net/ethernet/ti/netcp_core.c unsigned int start; start 1920 drivers/net/ethernet/ti/netcp_core.c start = u64_stats_fetch_begin_irq(&p->syncp_rx); start 1923 drivers/net/ethernet/ti/netcp_core.c } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); start 1926 drivers/net/ethernet/ti/netcp_core.c start = u64_stats_fetch_begin_irq(&p->syncp_tx); start 1929 drivers/net/ethernet/ti/netcp_core.c } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); start 2015 drivers/net/ethernet/ti/netcp_core.c if (!devm_request_mem_region(dev, res.start, size, start 2022 drivers/net/ethernet/ti/netcp_core.c efuse = devm_ioremap_nocache(dev, res.start, size); start 2025 drivers/net/ethernet/ti/netcp_core.c devm_release_mem_region(dev, res.start, size); start 2037 drivers/net/ethernet/ti/netcp_core.c devm_release_mem_region(dev, res.start, size); start 398 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c u8 *start = buf; start 494 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c start[1] = (buf - start - 2); start 497 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c return buf - start; start 517 drivers/net/ethernet/toshiba/spider_net.c struct spider_net_descr *start = chain->tail; start 518 drivers/net/ethernet/toshiba/spider_net.c struct spider_net_descr *descr = start; start 524 drivers/net/ethernet/toshiba/spider_net.c } while (descr != start); start 981 drivers/net/ethernet/toshiba/spider_net.c struct spider_net_descr *start= chain->tail; start 982 drivers/net/ethernet/toshiba/spider_net.c struct spider_net_descr *descr= start; start 983 drivers/net/ethernet/toshiba/spider_net.c struct spider_net_hw_descr *hwd = start->hwdescr; start 990 drivers/net/ethernet/toshiba/spider_net.c int off = start - chain->ring; start 1035 drivers/net/ethernet/toshiba/spider_net.c } while (descr != start); start 1042 drivers/net/ethernet/toshiba/spider_net.c descr = start; start 1061 drivers/net/ethernet/toshiba/spider_net.c } while (descr != start); start 2223 drivers/net/ethernet/via/via-rhine.c unsigned int start; start 2232 drivers/net/ethernet/via/via-rhine.c start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); start 2235 drivers/net/ethernet/via/via-rhine.c } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); start 2238 drivers/net/ethernet/via/via-rhine.c start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); start 2241 drivers/net/ethernet/via/via-rhine.c } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); start 2705 drivers/net/ethernet/via/via-velocity.c vptr->memaddr = res.start; start 277 drivers/net/ethernet/wiznet/w5100.c netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq); start 585 drivers/net/ethernet/wiznet/w5300.c netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); start 1334 drivers/net/ethernet/xilinx/ll_temac_main.c lp->regs = devm_ioremap_nocache(&pdev->dev, res->start, start 1429 drivers/net/ethernet/xilinx/ll_temac_main.c lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start, start 101 drivers/net/ethernet/xilinx/ll_temac_mdio.c (unsigned long long)res.start); start 1688 drivers/net/ethernet/xilinx/xilinx_axienet_main.c lp->regs_start = ethres->start; start 838 drivers/net/ethernet/xilinx/xilinx_emaclite.c if (lp->ndev->mem_start != res.start) { start 862 drivers/net/ethernet/xilinx/xilinx_emaclite.c (unsigned long long)res.start); start 1141 drivers/net/ethernet/xilinx/xilinx_emaclite.c ndev->irq = res->start; start 1150 drivers/net/ethernet/xilinx/xilinx_emaclite.c ndev->mem_start = res->start; start 623 drivers/net/ethernet/xircom/xirc2ps_cs.c if ((p_dev->resource[0]->start & 0xf) == 8) start 634 drivers/net/ethernet/xircom/xirc2ps_cs.c p_dev->resource[1]->start = p_dev->resource[0]->start; start 636 drivers/net/ethernet/xircom/xirc2ps_cs.c p_dev->resource[0]->start = ioaddr; start 647 drivers/net/ethernet/xircom/xirc2ps_cs.c resource_size_t tmp = p_dev->resource[1]->start; start 652 drivers/net/ethernet/xircom/xirc2ps_cs.c if ((p_dev->resource[0]->start & 0xf) == 8) start 663 drivers/net/ethernet/xircom/xirc2ps_cs.c p_dev->resource[1]->start = p_dev->resource[0]->start; start 664 drivers/net/ethernet/xircom/xirc2ps_cs.c p_dev->resource[0]->start = tmp; start 792 drivers/net/ethernet/xircom/xirc2ps_cs.c link->resource[0]->start = ioaddr; start 796 drivers/net/ethernet/xircom/xirc2ps_cs.c link->resource[0]->start = 0; /* let CS decide */ start 825 drivers/net/ethernet/xircom/xirc2ps_cs.c link->resource[1]->start & 0xff); start 830 drivers/net/ethernet/xircom/xirc2ps_cs.c (link->resource[1]->start >> 8) & 0xff); start 840 drivers/net/ethernet/xircom/xirc2ps_cs.c link->resource[2]->start = link->resource[2]->end = 0; start 844 drivers/net/ethernet/xircom/xirc2ps_cs.c local->dingo_ccr = ioremap(link->resource[2]->start, 0x1000) + 0x0800; start 852 drivers/net/ethernet/xircom/xirc2ps_cs.c ioaddr = link->resource[0]->start; start 899 drivers/net/ethernet/xircom/xirc2ps_cs.c dev->base_addr = link->resource[0]->start; start 787 drivers/net/ethernet/xscale/ixp4xx_eth.c int start; start 812 drivers/net/ethernet/xscale/ixp4xx_eth.c start = qmgr_stat_below_low_watermark(port->plat->txreadyq); start 814 drivers/net/ethernet/xscale/ixp4xx_eth.c if (start) { /* TX-ready queue was empty */ start 481 drivers/net/fddi/defxx.c bar_start[0] = to_tc_dev(bdev)->resource.start + start 1015 drivers/net/fddi/defxx.h PI_CMD_START_REQ start; start 1037 drivers/net/fddi/defxx.h PI_CMD_START_RSP start; start 1287 drivers/net/fddi/defza.c resource_size_t start, len; start 1312 drivers/net/fddi/defza.c start = tdev->resource.start; start 1313 drivers/net/fddi/defza.c len = tdev->resource.end - start + 1; start 1314 drivers/net/fddi/defza.c if (!request_mem_region(start, len, dev_name(bdev))) { start 1321 drivers/net/fddi/defza.c mmio = ioremap_nocache(start, len); start 1342 drivers/net/fddi/defza.c fp->name, (long long)tdev->resource.start, dev->irq); start 1504 drivers/net/fddi/defza.c release_mem_region(start, len); start 1518 drivers/net/fddi/defza.c resource_size_t start, len; start 1530 drivers/net/fddi/defza.c start = tdev->resource.start; start 1531 drivers/net/fddi/defza.c len = tdev->resource.end - start + 1; start 1532 drivers/net/fddi/defza.c release_mem_region(start, len); start 600 drivers/net/fddi/skfp/h/cmtdef.h void hwt_wait_time(struct s_smc *smc, u_long start, long duration); start 79 drivers/net/fddi/skfp/hwmtm.c static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start, start 321 drivers/net/fddi/skfp/hwmtm.c union s_fp_descr volatile *start, start 329 drivers/net/fddi/skfp/hwmtm.c DB_GEN(3, "descr ring starts at = %p", start); start 330 drivers/net/fddi/skfp/hwmtm.c for (i=count-1, d1=start; i ; i--) { start 340 drivers/net/fddi/skfp/hwmtm.c d1->r.rxd_next = &start->r ; start 341 drivers/net/fddi/skfp/hwmtm.c phys = mac_drv_virt2phys(smc,(void *)start) ; start 344 drivers/net/fddi/skfp/hwmtm.c for (i=count, d1=start; i ; i--) { start 223 drivers/net/fddi/skfp/hwt.c void hwt_wait_time(struct s_smc *smc, u_long start, long int duration) start 240 drivers/net/fddi/skfp/hwt.c diff = (long)(start - hwt_quick_read(smc)) ; start 251 drivers/net/fddi/skfp/hwt.c if (hwt_quick_read(smc) >= start) { start 257 drivers/net/fddi/skfp/hwt.c if (hwt_quick_read(smc) < start) { start 37 drivers/net/fjes/fjes_hw.c if (!request_mem_region(hw->hw_res.start, hw->hw_res.size, start 43 drivers/net/fjes/fjes_hw.c base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size); start 51 drivers/net/fjes/fjes_hw.c release_mem_region(hw->hw_res.start, hw->hw_res.size); start 310 drivers/net/fjes/fjes_hw.h u64 start; start 95 drivers/net/fjes/fjes_main.c .start = 0, start 100 drivers/net/fjes/fjes_main.c .start = 0, start 197 drivers/net/fjes/fjes_main.c res[0].start = addr->address.minimum; start 206 drivers/net/fjes/fjes_main.c res[1].start = irq->interrupts[0]; start 1265 drivers/net/fjes/fjes_main.c hw->hw_res.start = res->start; start 417 drivers/net/hamradio/bpqether.c .start = bpq_seq_start, start 301 drivers/net/hamradio/dmascc.c unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS], start 368 drivers/net/hamradio/dmascc.c start[i] = jiffies; start 390 drivers/net/hamradio/dmascc.c delay[i] = jiffies - start[i]; start 2082 drivers/net/hamradio/scc.c .start = scc_net_seq_start, start 824 drivers/net/hamradio/yam.c .start = yam_seq_start, start 773 drivers/net/hyperv/netvsc.c char *start = net_device->send_buf; start 774 drivers/net/hyperv/netvsc.c char *dest = start + (section_index * net_device->send_section_size) start 1169 drivers/net/hyperv/netvsc_drv.c unsigned int start; start 1172 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 1177 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 1199 drivers/net/hyperv/netvsc_drv.c unsigned int start; start 1202 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 1207 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 1221 drivers/net/hyperv/netvsc_drv.c unsigned int start; start 1225 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 1228 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 1235 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 1238 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 1272 drivers/net/hyperv/netvsc_drv.c unsigned int start; start 1276 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 1279 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 1286 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 1290 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 1409 drivers/net/hyperv/netvsc_drv.c unsigned int start; start 1427 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&qstats->syncp); start 1430 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); start 1436 drivers/net/hyperv/netvsc_drv.c start = u64_stats_fetch_begin_irq(&qstats->syncp); start 1439 drivers/net/hyperv/netvsc_drv.c } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); start 930 drivers/net/ieee802154/adf7242.c .start = adf7242_start, start 1315 drivers/net/ieee802154/at86rf230.c .start = at86rf230_start, start 781 drivers/net/ieee802154/atusb.c .start = atusb_start, start 2388 drivers/net/ieee802154/ca8210.c .start = ca8210_start, start 818 drivers/net/ieee802154/cc2520.c .start = cc2520_start, start 118 drivers/net/ieee802154/fakelb.c .start = fakelb_hw_start, start 172 drivers/net/ieee802154/mac802154_hwsim.c .start = hwsim_hw_start, start 760 drivers/net/ieee802154/mcr20a.c .start = mcr20a_start, start 1009 drivers/net/ieee802154/mrf24j40.c .start = mrf24j40_start, start 131 drivers/net/ifb.c unsigned int start; start 137 drivers/net/ifb.c start = u64_stats_fetch_begin_irq(&txp->rsync); start 140 drivers/net/ifb.c } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); start 145 drivers/net/ifb.c start = u64_stats_fetch_begin_irq(&txp->tsync); start 148 drivers/net/ifb.c } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); start 112 drivers/net/loopback.c unsigned int start; start 116 drivers/net/loopback.c start = u64_stats_fetch_begin_irq(&lb_stats->syncp); start 119 drivers/net/loopback.c } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start)); start 2208 drivers/net/macsec.c unsigned int start; start 2212 drivers/net/macsec.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 2214 drivers/net/macsec.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 2272 drivers/net/macsec.c unsigned int start; start 2276 drivers/net/macsec.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 2278 drivers/net/macsec.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 2312 drivers/net/macsec.c unsigned int start; start 2316 drivers/net/macsec.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 2318 drivers/net/macsec.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 2941 drivers/net/macsec.c int start; start 2945 drivers/net/macsec.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 2950 drivers/net/macsec.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 923 drivers/net/macvlan.c unsigned int start; start 929 drivers/net/macvlan.c start = u64_stats_fetch_begin_irq(&p->syncp); start 935 drivers/net/macvlan.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 66 drivers/net/netdevsim/netdev.c unsigned int start; start 69 drivers/net/netdevsim/netdev.c start = u64_stats_fetch_begin(&ns->syncp); start 72 drivers/net/netdevsim/netdev.c } while (u64_stats_fetch_retry(&ns->syncp, start)); start 65 drivers/net/nlmon.c unsigned int start; start 70 drivers/net/nlmon.c start = u64_stats_fetch_begin_irq(&nl_stats->syncp); start 73 drivers/net/nlmon.c } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start)); start 329 drivers/net/phy/dp83640.c sec = clkreq->perout.start.sec; start 330 drivers/net/phy/dp83640.c nsec = clkreq->perout.start.nsec; start 239 drivers/net/phy/mdio-bcm-unimac.c priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 197 drivers/net/phy/mdio-mux-bcm-iproc.c if (res->start & 0xfff) { start 202 drivers/net/phy/mdio-mux-bcm-iproc.c res->start &= ~0xfff; start 203 drivers/net/phy/mdio-mux-bcm-iproc.c res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1; start 117 drivers/net/phy/mdio-mux-mmioreg.c s->phys = res.start; start 38 drivers/net/phy/mdio-octeon.c mdio_phys = res_mem->start; start 88 drivers/net/phy/mdio-thunder.c r.start - pci_resource_start(pdev, 0); start 94 drivers/net/phy/mdio-thunder.c snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start); start 103 drivers/net/phy/mdio-thunder.c dev_info(&pdev->dev, "Added bus at %llx\n", r.start); start 1120 drivers/net/phy/mscc.c static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size, start 1127 drivers/net/phy/mscc.c phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start); start 352 drivers/net/phy/sfp-bus.c bus->socket_ops->start(bus->sfp); start 419 drivers/net/phy/sfp-bus.c bus->socket_ops->start(bus->sfp); start 1769 drivers/net/phy/sfp.c .start = sfp_start, start 12 drivers/net/phy/sfp.h void (*start)(struct sfp *sfp); start 848 drivers/net/ppp/pppoe.c char *start; start 886 drivers/net/ppp/pppoe.c start = (char *)&ph->tag[0]; start 888 drivers/net/ppp/pppoe.c error = memcpy_from_msg(start, m, total_len); start 1096 drivers/net/ppp/pppoe.c .start = pppoe_seq_start, start 1844 drivers/net/team/team.c unsigned int start; start 1850 drivers/net/team/team.c start = u64_stats_fetch_begin_irq(&p->syncp); start 1856 drivers/net/team/team.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 465 drivers/net/team/team_mode_loadbalance.c unsigned int start; start 469 drivers/net/team/team_mode_loadbalance.c start = u64_stats_fetch_begin_irq(syncp); start 471 drivers/net/team/team_mode_loadbalance.c } while (u64_stats_fetch_retry_irq(syncp, start)); start 1165 drivers/net/tun.c unsigned int start; start 1169 drivers/net/tun.c start = u64_stats_fetch_begin(&p->syncp); start 1174 drivers/net/tun.c } while (u64_stats_fetch_retry(&p->syncp, start)); start 129 drivers/net/usb/qmi_wwan.c unsigned int start; start 142 drivers/net/usb/qmi_wwan.c start = u64_stats_fetch_begin_irq(&stats64->syncp); start 147 drivers/net/usb/qmi_wwan.c } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); start 989 drivers/net/usb/usbnet.c unsigned int start; start 1002 drivers/net/usb/usbnet.c start = u64_stats_fetch_begin_irq(&stats64->syncp); start 1007 drivers/net/usb/usbnet.c } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); start 159 drivers/net/veth.c unsigned int start; start 163 drivers/net/veth.c start = u64_stats_fetch_begin_irq(&rq_stats->syncp); start 168 drivers/net/veth.c } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); start 294 drivers/net/veth.c unsigned int start; start 297 drivers/net/veth.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 300 drivers/net/veth.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 318 drivers/net/veth.c unsigned int start; start 321 drivers/net/veth.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 325 drivers/net/veth.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 620 drivers/net/veth.c void *head, *start; start 633 drivers/net/veth.c start = head + VETH_XDP_HEADROOM; start 634 drivers/net/veth.c if (skb_copy_bits(skb, -mac_len, start, pktlen)) { start 1722 drivers/net/virtio_net.c unsigned int start; start 1731 drivers/net/virtio_net.c start = u64_stats_fetch_begin_irq(&sq->stats.syncp); start 1734 drivers/net/virtio_net.c } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); start 1737 drivers/net/virtio_net.c start = u64_stats_fetch_begin_irq(&rq->stats.syncp); start 1741 drivers/net/virtio_net.c } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); start 2125 drivers/net/virtio_net.c unsigned int idx = 0, start, i, j; start 2134 drivers/net/virtio_net.c start = u64_stats_fetch_begin_irq(&rq->stats.syncp); start 2139 drivers/net/virtio_net.c } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); start 2148 drivers/net/virtio_net.c start = u64_stats_fetch_begin_irq(&sq->stats.syncp); start 2153 drivers/net/virtio_net.c } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); start 87 drivers/net/vrf.c unsigned int start; start 91 drivers/net/vrf.c start = u64_stats_fetch_begin_irq(&dstats->syncp); start 97 drivers/net/vrf.c } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); start 72 drivers/net/vsockmon.c unsigned int start; start 77 drivers/net/vsockmon.c start = u64_stats_fetch_begin_irq(&vstats->syncp); start 80 drivers/net/vsockmon.c } while (u64_stats_fetch_retry_irq(&vstats->syncp, start)); start 708 drivers/net/vxlan.c size_t start, offset; start 716 drivers/net/vxlan.c start = vxlan_rco_start(vni_field); start 717 drivers/net/vxlan.c offset = start + vxlan_rco_offset(vni_field); start 720 drivers/net/vxlan.c start, offset, grc, nopartial); start 1514 drivers/net/vxlan.c size_t start, offset; start 1519 drivers/net/vxlan.c start = vxlan_rco_start(unparsed->vx_vni); start 1520 drivers/net/vxlan.c offset = start + vxlan_rco_offset(unparsed->vx_vni); start 1525 drivers/net/vxlan.c skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, start 2193 drivers/net/vxlan.c unsigned int start; start 2195 drivers/net/vxlan.c start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); start 2196 drivers/net/vxlan.c vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); start 433 drivers/net/wan/farsync.c int start; /* Indication of start/stop to network */ start 973 drivers/net/wan/farsync.c port->start = 0; start 1054 drivers/net/wan/farsync.c port->start = 0; start 1407 drivers/net/wan/farsync.c if (port->start) { start 1411 drivers/net/wan/farsync.c port->start = 0; start 2257 drivers/net/wan/farsync.c port->start = 0; start 2312 drivers/net/wan/farsync.c port->start = 1; /* I'm using this to signal stop sent up */ start 1085 drivers/net/wan/fsl_ucc_hdlc.c *ptr = ioremap(res->start, resource_size(res)); start 1170 drivers/net/wan/fsl_ucc_hdlc.c ut_info->uf_info.regs = res.start; start 73 drivers/net/wan/hdlc.c if (hdlc->proto->start) start 74 drivers/net/wan/hdlc.c hdlc->proto->start(dev); start 314 drivers/net/wan/hdlc_cisco.c .start = cisco_start, start 1168 drivers/net/wan/hdlc_fr.c .start = fr_start, start 634 drivers/net/wan/hdlc_ppp.c .start = ppp_start, start 799 drivers/net/wan/ixp4xx_hss.c int start; start 814 drivers/net/wan/ixp4xx_hss.c start = qmgr_stat_below_low_watermark(port->plat->txreadyq); start 817 drivers/net/wan/ixp4xx_hss.c if (start) { /* TX-ready queue was empty */ start 248 drivers/net/wan/sdla.c unsigned long start, done, now; start 251 drivers/net/wan/sdla.c start = now = jiffies; start 267 drivers/net/wan/sdla.c return time_before(jiffies, done) ? jiffies - start : -1; start 1767 drivers/net/wireless/admtek/adm8211.c .start = adm8211_start, start 1351 drivers/net/wireless/ath/ar5523/ar5523.c .start = ar5523_start, start 716 drivers/net/wireless/ath/ath10k/ahb.c .start = ath10k_ahb_hif_start, start 532 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 544 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x0, start 559 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x27000, start 575 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 585 drivers/net/wireless/ath/ath10k/coredump.c .start = 0xa0000, start 595 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x800, start 608 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 618 drivers/net/wireless/ath/ath10k/coredump.c .start = 0xa0000, start 628 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x800, start 640 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x00980000, start 650 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x00a00000, start 663 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 673 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x4000, start 683 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x8000, start 696 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 706 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x98000, start 716 drivers/net/wireless/ath/ath10k/coredump.c .start = 0xC0000, start 726 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x30000, start 736 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x3f000, start 746 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x43000, start 756 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x4A000, start 766 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x80000, start 779 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 789 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x98000, start 799 drivers/net/wireless/ath/ath10k/coredump.c .start = 0xC0000, start 809 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x30000, start 819 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x3f000, start 829 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x43000, start 839 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x4A000, start 849 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x80000, start 874 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x400000, start 884 drivers/net/wireless/ath/ath10k/coredump.c .start = 0xC0000, start 894 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x98000, start 904 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x30000, start 914 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x3f000, start 924 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x43000, start 934 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x4A000, start 944 drivers/net/wireless/ath/ath10k/coredump.c .start = 0x080000, start 98 drivers/net/wireless/ath/ath10k/coredump.h __le32 start; start 130 drivers/net/wireless/ath/ath10k/coredump.h u32 start; start 141 drivers/net/wireless/ath/ath10k/coredump.h u32 start; start 50 drivers/net/wireless/ath/ath10k/hif.h int (*start)(struct ath10k *ar); start 132 drivers/net/wireless/ath/ath10k/hif.h return ar->hif.ops->start(ar); start 8186 drivers/net/wireless/ath/ath10k/mac.c .start = ath10k_start, start 1481 drivers/net/wireless/ath/ath10k/pci.c if (mem_region->start > cur_section->start) { start 1483 drivers/net/wireless/ath/ath10k/pci.c mem_region->start, cur_section->start); start 1487 drivers/net/wireless/ath/ath10k/pci.c skip_size = cur_section->start - mem_region->start; start 1500 drivers/net/wireless/ath/ath10k/pci.c section_size = cur_section->end - cur_section->start; start 1504 drivers/net/wireless/ath/ath10k/pci.c cur_section->start, start 1516 drivers/net/wireless/ath/ath10k/pci.c if (cur_section->end > next_section->start) { start 1518 drivers/net/wireless/ath/ath10k/pci.c next_section->start, start 1523 drivers/net/wireless/ath/ath10k/pci.c skip_size = next_section->start - cur_section->end; start 1534 drivers/net/wireless/ath/ath10k/pci.c ret = ath10k_pci_diag_read_mem(ar, cur_section->start, start 1538 drivers/net/wireless/ath/ath10k/pci.c cur_section->start, ret); start 1590 drivers/net/wireless/ath/ath10k/pci.c base_addr += region->start; start 1617 drivers/net/wireless/ath/ath10k/pci.c *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); start 1643 drivers/net/wireless/ath/ath10k/pci.c current_region->start, start 1698 drivers/net/wireless/ath/ath10k/pci.c shift = current_region->start >> 20; start 1734 drivers/net/wireless/ath/ath10k/pci.c hdr->start = cpu_to_le32(current_region->start); start 3073 drivers/net/wireless/ath/ath10k/pci.c .start = ath10k_pci_hif_start, start 972 drivers/net/wireless/ath/ath10k/qmi.c qmi->msa_pa = r.start; start 977 drivers/net/wireless/ath/ath10k/qmi.c dev_err(dev, "failed to map memory region: %pa\n", &r.start); start 1943 drivers/net/wireless/ath/ath10k/sdio.c .start = ath10k_sdio_hif_start, start 1109 drivers/net/wireless/ath/ath10k/snoc.c .start = ath10k_snoc_hif_start, start 1241 drivers/net/wireless/ath/ath10k/snoc.c ar_snoc->mem_pa = res->start; start 1257 drivers/net/wireless/ath/ath10k/snoc.c ar_snoc->ce_irqs[i].irq_line = res->start; start 736 drivers/net/wireless/ath/ath10k/usb.c .start = ath10k_usb_hif_start, start 109 drivers/net/wireless/ath/ath5k/ahb.c mem = ioremap_nocache(res->start, resource_size(res)); start 123 drivers/net/wireless/ath/ath5k/ahb.c irq = res->start; start 165 drivers/net/wireless/ath/ath5k/debug.c .start = reg_start, start 782 drivers/net/wireless/ath/ath5k/mac80211-ops.c .start = ath5k_start, start 35 drivers/net/wireless/ath/ath6kl/htc-ops.h return target->dev->ar->htc_ops->start(target); start 551 drivers/net/wireless/ath/ath6kl/htc.h int (*start)(struct htc_target *target); start 2918 drivers/net/wireless/ath/ath6kl/htc_mbox.c .start = ath6kl_htc_mbox_start, start 1707 drivers/net/wireless/ath/ath6kl/htc_pipe.c .start = ath6kl_htc_pipe_start, start 95 drivers/net/wireless/ath/ath9k/ahb.c mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); start 107 drivers/net/wireless/ath/ath9k/ahb.c irq = res->start; start 920 drivers/net/wireless/ath/ath9k/debug.c u32 start; start 940 drivers/net/wireless/ath/ath9k/debug.c if (reg_hole_list[j].start == i << 2) { start 523 drivers/net/wireless/ath/ath9k/hif_usb.c .start = hif_usb_start, start 1871 drivers/net/wireless/ath/ath9k/htc_drv_main.c .start = ath9k_htc_start, start 314 drivers/net/wireless/ath/ath9k/htc_hst.c target->hif->start(target->hif_dev); start 36 drivers/net/wireless/ath/ath9k/htc_hst.h void (*start) (void *hif_handle); start 232 drivers/net/wireless/ath/ath9k/main.c static bool ath_complete_reset(struct ath_softc *sc, bool start) start 245 drivers/net/wireless/ath/ath9k/main.c if (!sc->cur_chan->offchannel && start) { start 2665 drivers/net/wireless/ath/ath9k/main.c .start = ath9k_start, start 320 drivers/net/wireless/ath/ath9k/mci.c if (info->start) { start 404 drivers/net/wireless/ath/ath9k/mci.c profile_info.start); start 112 drivers/net/wireless/ath/ath9k/mci.h bool start; start 176 drivers/net/wireless/ath/ath9k/tx99.c bool start; start 192 drivers/net/wireless/ath/ath9k/tx99.c if (strtobool(buf, &start)) start 197 drivers/net/wireless/ath/ath9k/tx99.c if (start == sc->tx99_state) { start 198 drivers/net/wireless/ath/ath9k/tx99.c if (!start) start 204 drivers/net/wireless/ath/ath9k/tx99.c if (!start) { start 1751 drivers/net/wireless/ath/carl9170/main.c .start = carl9170_op_start, start 1138 drivers/net/wireless/ath/wcn36xx/main.c .start = wcn36xx_start, start 1228 drivers/net/wireless/ath/wcn36xx/main.c wcn->tx_irq = res->start; start 1236 drivers/net/wireless/ath/wcn36xx/main.c wcn->rx_irq = res->start; start 254 drivers/net/wireless/ath/wcn36xx/smd.c unsigned long start; start 262 drivers/net/wireless/ath/wcn36xx/smd.c start = jiffies; start 278 drivers/net/wireless/ath/wcn36xx/smd.c jiffies_to_msecs(jiffies - start)); start 690 drivers/net/wireless/ath/wil6210/wmi.h u8 start; start 2188 drivers/net/wireless/atmel/at76c50x-usb.c .start = at76_mac80211_start, start 3519 drivers/net/wireless/atmel/atmel.c start(priv, BSS_TYPE_AD_HOC); start 146 drivers/net/wireless/atmel/atmel_cs.c link->resource[0]->start, start 49 drivers/net/wireless/atmel/atmel_pci.c dev = init_atmel_card(pdev->irq, pdev->resource[1].start, start 88 drivers/net/wireless/broadcom/b43/dma.c int start, int end, int irq) start 105 drivers/net/wireless/broadcom/b43/dma.c if (start) start 180 drivers/net/wireless/broadcom/b43/dma.c int start, int end, int irq) start 197 drivers/net/wireless/broadcom/b43/dma.c if (start) start 205 drivers/net/wireless/broadcom/b43/dma.h dma_addr_t dmaaddr, u16 bufsize, int start, start 5183 drivers/net/wireless/broadcom/b43/main.c .start = b43_op_start, start 301 drivers/net/wireless/broadcom/b43/phy_g.c u16 start = 0x08, end = 0x18; start 306 drivers/net/wireless/broadcom/b43/phy_g.c start = 0x10; start 316 drivers/net/wireless/broadcom/b43/phy_g.c for (i = start; i < end; i++) start 333 drivers/net/wireless/broadcom/b43/phy_g.c u16 start = 0x0008, end = 0x0018; start 336 drivers/net/wireless/broadcom/b43/phy_g.c start = 0x0010; start 351 drivers/net/wireless/broadcom/b43/phy_g.c for (i = start; i < end; i++) start 352 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_write16(dev, table, i, i - start); start 2289 drivers/net/wireless/broadcom/b43/phy_g.c unsigned int i, j, start, end; start 2300 drivers/net/wireless/broadcom/b43/phy_g.c start = (channel - 5 > 0) ? channel - 5 : 1; start 2303 drivers/net/wireless/broadcom/b43/phy_g.c for (i = start; i <= end; i++) { start 48 drivers/net/wireless/broadcom/b43legacy/dma.c int start, int end, int irq) start 67 drivers/net/wireless/broadcom/b43legacy/dma.c if (start) start 3541 drivers/net/wireless/broadcom/b43legacy/main.c .start = b43legacy_op_start, start 137 drivers/net/wireless/broadcom/b43legacy/radio.c u16 start = 0x08; start 144 drivers/net/wireless/broadcom/b43legacy/radio.c start = 0x10; start 151 drivers/net/wireless/broadcom/b43legacy/radio.c for (i = start; i < end; i++) start 175 drivers/net/wireless/broadcom/b43legacy/radio.c u16 start = 0x0008; start 180 drivers/net/wireless/broadcom/b43legacy/radio.c start = 0x0010; start 192 drivers/net/wireless/broadcom/b43legacy/radio.c for (i = start; i < end; i++) start 193 drivers/net/wireless/broadcom/b43legacy/radio.c b43legacy_ilt_write(dev, offset + i, i - start); start 271 drivers/net/wireless/broadcom/b43legacy/radio.c unsigned int start; start 286 drivers/net/wireless/broadcom/b43legacy/radio.c start = (channel - 5 > 0) ? channel - 5 : 1; start 289 drivers/net/wireless/broadcom/b43legacy/radio.c for (i = start; i <= end; i++) { start 1655 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c u8 start, u8 end, start 1667 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (rfi->pktslots[start]) { start 1668 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c __skb_queue_tail(skb_list, rfi->pktslots[start]); start 1669 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c rfi->pktslots[start] = NULL; start 1671 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c start++; start 1672 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (start > rfi->max_idx) start 1673 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c start = 0; start 1674 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c } while (start != end); start 1471 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c u16 start, end, i; start 1486 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c start = di->txin; start 1508 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c if ((start == 0) && (end > di->txout)) start 1511 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c for (i = start; i != end && !txp; i = nexttxd(di, i)) { start 1539 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c start, end, di->txout); start 967 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c .start = brcms_ops_start, start 24650 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlc_phy_a1_nphy(struct brcms_phy *pi, u8 core, u32 winsz, u32 start, start 24655 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c sz = end - start + 1; start 24691 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c } while (end-- != start); start 24696 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c NPHY_TBL_ID_EPSILONTBL1, sz, start, 32, dst); start 1107 drivers/net/wireless/cisco/airo.c static int get_dec_u16( char *buffer, int *start, int limit ); start 4767 drivers/net/wireless/cisco/airo.c static int get_dec_u16( char *buffer, int *start, int limit ) { start 4770 drivers/net/wireless/cisco/airo.c for (value = 0; *start < limit && buffer[*start] >= '0' && start 4771 drivers/net/wireless/cisco/airo.c buffer[*start] <= '9'; (*start)++) { start 4774 drivers/net/wireless/cisco/airo.c value += buffer[*start] - '0'; start 5563 drivers/net/wireless/cisco/airo.c dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev); start 5565 drivers/net/wireless/cisco/airo.c dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev); start 124 drivers/net/wireless/cisco/airo_cs.c link->resource[0]->start, 1, &link->dev); start 2734 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) start 2739 drivers/net/wireless/intel/ipw2x00/ipw2200.c _ipw_write32(priv, IPW_AUTOINC_ADDR, start); start 3226 drivers/net/wireless/intel/ipw2x00/ipw2200.c u8 *start; start 3233 drivers/net/wireless/intel/ipw2x00/ipw2200.c start = data + offset; start 3245 drivers/net/wireless/intel/ipw2x00/ipw2200.c memcpy(virts[total_nr], start, size); start 3246 drivers/net/wireless/intel/ipw2x00/ipw2200.c start += size; start 6165 drivers/net/wireless/intel/ipw2x00/ipw2200.c int start = channel_index; start 6179 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (start != channel_index) { start 6180 drivers/net/wireless/intel/ipw2x00/ipw2200.c scan->channels_list[start] = (u8) (IPW_A_MODE << 6) | start 6181 drivers/net/wireless/intel/ipw2x00/ipw2200.c (channel_index - start); start 6187 drivers/net/wireless/intel/ipw2x00/ipw2200.c int start = channel_index; start 6247 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (start != channel_index) { start 6248 drivers/net/wireless/intel/ipw2x00/ipw2200.c scan->channels_list[start] = (u8) (IPW_B_MODE << 6) | start 6249 drivers/net/wireless/intel/ipw2x00/ipw2200.c (channel_index - start); start 34 drivers/net/wireless/intel/ipw2x00/libipw_wx.c static inline unsigned int elapsed_jiffies_msecs(unsigned long start) start 38 drivers/net/wireless/intel/ipw2x00/libipw_wx.c if (end >= start) start 39 drivers/net/wireless/intel/ipw2x00/libipw_wx.c return jiffies_to_msecs(end - start); start 41 drivers/net/wireless/intel/ipw2x00/libipw_wx.c return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); start 46 drivers/net/wireless/intel/ipw2x00/libipw_wx.c char *start, char *stop, start 61 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); start 69 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, start 76 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); start 86 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_event(info, start, stop, start 96 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); start 105 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, start 111 drivers/net/wireless/intel/ipw2x00/libipw_wx.c current_val = start + iwe_stream_lcp_len(info); start 126 drivers/net/wireless/intel/ipw2x00/libipw_wx.c current_val = iwe_stream_add_value(info, start, current_val, start 134 drivers/net/wireless/intel/ipw2x00/libipw_wx.c current_val = iwe_stream_add_value(info, start, current_val, start 138 drivers/net/wireless/intel/ipw2x00/libipw_wx.c if ((current_val - start) > iwe_stream_lcp_len(info)) start 139 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = current_val; start 185 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); start 192 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, custom); start 200 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 209 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 221 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, custom); start 242 drivers/net/wireless/intel/ipw2x00/libipw_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, custom); start 245 drivers/net/wireless/intel/ipw2x00/libipw_wx.c return start; start 3446 drivers/net/wireless/intel/iwlegacy/3945-mac.c .start = il3945_mac_start, start 2570 drivers/net/wireless/intel/iwlegacy/4965-mac.c int start = 0; start 2575 drivers/net/wireless/intel/iwlegacy/4965-mac.c start = IL_STA_ID; start 2581 drivers/net/wireless/intel/iwlegacy/4965-mac.c for (i = start; i < il->hw_params.max_stations; i++) start 2689 drivers/net/wireless/intel/iwlegacy/4965-mac.c int start = agg->start_idx; start 2724 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = idx - start; start 2726 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = (start - idx) + 0xff; start 2729 drivers/net/wireless/intel/iwlegacy/4965-mac.c start = idx; start 2731 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = 0xff - (start - idx); start 2733 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = start - idx; start 2734 drivers/net/wireless/intel/iwlegacy/4965-mac.c start = idx; start 2739 drivers/net/wireless/intel/iwlegacy/4965-mac.c D_TX_REPLY("start=%d bitmap=0x%llx\n", start, start 2744 drivers/net/wireless/intel/iwlegacy/4965-mac.c agg->start_idx = start; start 6315 drivers/net/wireless/intel/iwlegacy/4965-mac.c .start = il4965_mac_start, start 3164 drivers/net/wireless/intel/iwlwifi/dvm/commands.h __le32 start; start 1577 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c .start = iwlagn_mac_start, start 720 drivers/net/wireless/intel/iwlwifi/dvm/main.c calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg); start 2134 drivers/net/wireless/intel/iwlwifi/dvm/main.c .start = iwl_op_mode_dvm_start, start 96 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; start 366 drivers/net/wireless/intel/iwlwifi/fw/dbg.c u32 start, end; start 370 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00000, .end = 0x00a00000 }, start 371 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0000c, .end = 0x00a00024 }, start 372 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0002c, .end = 0x00a0003c }, start 373 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00410, .end = 0x00a00418 }, start 374 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00420, .end = 0x00a00420 }, start 375 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00428, .end = 0x00a00428 }, start 376 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00430, .end = 0x00a0043c }, start 377 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00444, .end = 0x00a00444 }, start 378 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a004c0, .end = 0x00a004cc }, start 379 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a004d8, .end = 0x00a004d8 }, start 380 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a004e0, .end = 0x00a004f0 }, start 381 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00840, .end = 0x00a00840 }, start 382 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00850, .end = 0x00a00858 }, start 383 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01004, .end = 0x00a01008 }, start 384 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01010, .end = 0x00a01010 }, start 385 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01018, .end = 0x00a01018 }, start 386 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01024, .end = 0x00a01024 }, start 387 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0102c, .end = 0x00a01034 }, start 388 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0103c, .end = 0x00a01040 }, start 389 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01048, .end = 0x00a01094 }, start 390 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c00, .end = 0x00a01c20 }, start 391 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c58, .end = 0x00a01c58 }, start 392 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c7c, .end = 0x00a01c7c }, start 393 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c28, .end = 0x00a01c54 }, start 394 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c5c, .end = 0x00a01c5c }, start 395 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c60, .end = 0x00a01cdc }, start 396 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01ce0, .end = 0x00a01d0c }, start 397 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01d18, .end = 0x00a01d20 }, start 398 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01d2c, .end = 0x00a01d30 }, start 399 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01d40, .end = 0x00a01d5c }, start 400 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01d80, .end = 0x00a01d80 }, start 401 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01d98, .end = 0x00a01d9c }, start 402 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01da8, .end = 0x00a01da8 }, start 403 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01db8, .end = 0x00a01df4 }, start 404 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01dc0, .end = 0x00a01dfc }, start 405 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e00, .end = 0x00a01e2c }, start 406 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e40, .end = 0x00a01e60 }, start 407 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e68, .end = 0x00a01e6c }, start 408 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e74, .end = 0x00a01e74 }, start 409 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e84, .end = 0x00a01e90 }, start 410 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e9c, .end = 0x00a01ec4 }, start 411 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01ed0, .end = 0x00a01ee0 }, start 412 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01f00, .end = 0x00a01f1c }, start 413 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01f44, .end = 0x00a01ffc }, start 414 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02000, .end = 0x00a02048 }, start 415 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02068, .end = 0x00a020f0 }, start 416 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02100, .end = 0x00a02118 }, start 417 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02140, .end = 0x00a0214c }, start 418 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02168, .end = 0x00a0218c }, start 419 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a021c0, .end = 0x00a021c0 }, start 420 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02400, .end = 0x00a02410 }, start 421 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02418, .end = 0x00a02420 }, start 422 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02428, .end = 0x00a0242c }, start 423 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02434, .end = 0x00a02434 }, start 424 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02440, .end = 0x00a02460 }, start 425 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02468, .end = 0x00a024b0 }, start 426 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a024c8, .end = 0x00a024cc }, start 427 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02500, .end = 0x00a02504 }, start 428 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0250c, .end = 0x00a02510 }, start 429 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02540, .end = 0x00a02554 }, start 430 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02580, .end = 0x00a025f4 }, start 431 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02600, .end = 0x00a0260c }, start 432 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02648, .end = 0x00a02650 }, start 433 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02680, .end = 0x00a02680 }, start 434 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a026c0, .end = 0x00a026d0 }, start 435 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02700, .end = 0x00a0270c }, start 436 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02804, .end = 0x00a02804 }, start 437 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02818, .end = 0x00a0281c }, start 438 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02c00, .end = 0x00a02db4 }, start 439 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02df4, .end = 0x00a02fb0 }, start 440 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03000, .end = 0x00a03014 }, start 441 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0301c, .end = 0x00a0302c }, start 442 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03034, .end = 0x00a03038 }, start 443 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03040, .end = 0x00a03048 }, start 444 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03060, .end = 0x00a03068 }, start 445 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03070, .end = 0x00a03074 }, start 446 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0307c, .end = 0x00a0307c }, start 447 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03080, .end = 0x00a03084 }, start 448 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0308c, .end = 0x00a03090 }, start 449 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03098, .end = 0x00a03098 }, start 450 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030a0, .end = 0x00a030a0 }, start 451 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030a8, .end = 0x00a030b4 }, start 452 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030bc, .end = 0x00a030bc }, start 453 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030c0, .end = 0x00a0312c }, start 454 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03c00, .end = 0x00a03c5c }, start 455 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04400, .end = 0x00a04454 }, start 456 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04460, .end = 0x00a04474 }, start 457 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a044c0, .end = 0x00a044ec }, start 458 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04500, .end = 0x00a04504 }, start 459 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04510, .end = 0x00a04538 }, start 460 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04540, .end = 0x00a04548 }, start 461 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04560, .end = 0x00a0457c }, start 462 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04590, .end = 0x00a04598 }, start 463 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a045c0, .end = 0x00a045f4 }, start 467 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a05c00, .end = 0x00a05c18 }, start 468 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a05400, .end = 0x00a056e8 }, start 469 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a08000, .end = 0x00a098bc }, start 470 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02400, .end = 0x00a02758 }, start 471 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04764, .end = 0x00a0476c }, start 472 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04770, .end = 0x00a04774 }, start 473 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04620, .end = 0x00a04624 }, start 477 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00000, .end = 0x00a00000 }, start 478 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0000c, .end = 0x00a00024 }, start 479 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0002c, .end = 0x00a00034 }, start 480 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0003c, .end = 0x00a0003c }, start 481 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00410, .end = 0x00a00418 }, start 482 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00420, .end = 0x00a00420 }, start 483 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00428, .end = 0x00a00428 }, start 484 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00430, .end = 0x00a0043c }, start 485 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00444, .end = 0x00a00444 }, start 486 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00840, .end = 0x00a00840 }, start 487 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a00850, .end = 0x00a00858 }, start 488 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01004, .end = 0x00a01008 }, start 489 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01010, .end = 0x00a01010 }, start 490 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01018, .end = 0x00a01018 }, start 491 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01024, .end = 0x00a01024 }, start 492 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0102c, .end = 0x00a01034 }, start 493 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0103c, .end = 0x00a01040 }, start 494 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01048, .end = 0x00a01050 }, start 495 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01058, .end = 0x00a01058 }, start 496 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01060, .end = 0x00a01070 }, start 497 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0108c, .end = 0x00a0108c }, start 498 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01c20, .end = 0x00a01c28 }, start 499 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01d10, .end = 0x00a01d10 }, start 500 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e28, .end = 0x00a01e2c }, start 501 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e60, .end = 0x00a01e60 }, start 502 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01e80, .end = 0x00a01e80 }, start 503 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a01ea0, .end = 0x00a01ea0 }, start 504 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02000, .end = 0x00a0201c }, start 505 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02024, .end = 0x00a02024 }, start 506 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02040, .end = 0x00a02048 }, start 507 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a020c0, .end = 0x00a020e0 }, start 508 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02400, .end = 0x00a02404 }, start 509 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0240c, .end = 0x00a02414 }, start 510 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0241c, .end = 0x00a0243c }, start 511 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02448, .end = 0x00a024bc }, start 512 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a024c4, .end = 0x00a024cc }, start 513 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02508, .end = 0x00a02508 }, start 514 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02510, .end = 0x00a02514 }, start 515 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0251c, .end = 0x00a0251c }, start 516 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0252c, .end = 0x00a0255c }, start 517 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02564, .end = 0x00a025a0 }, start 518 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a025a8, .end = 0x00a025b4 }, start 519 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a025c0, .end = 0x00a025c0 }, start 520 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a025e8, .end = 0x00a025f4 }, start 521 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02c08, .end = 0x00a02c18 }, start 522 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02c2c, .end = 0x00a02c38 }, start 523 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a02c68, .end = 0x00a02c78 }, start 524 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03000, .end = 0x00a03000 }, start 525 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03010, .end = 0x00a03014 }, start 526 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0301c, .end = 0x00a0302c }, start 527 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03034, .end = 0x00a03038 }, start 528 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03040, .end = 0x00a03044 }, start 529 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03060, .end = 0x00a03068 }, start 530 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03070, .end = 0x00a03070 }, start 531 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0307c, .end = 0x00a03084 }, start 532 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0308c, .end = 0x00a03090 }, start 533 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03098, .end = 0x00a03098 }, start 534 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030a0, .end = 0x00a030a0 }, start 535 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030a8, .end = 0x00a030b4 }, start 536 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030bc, .end = 0x00a030c0 }, start 537 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a030c8, .end = 0x00a030f4 }, start 538 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03100, .end = 0x00a0312c }, start 539 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a03c00, .end = 0x00a03c5c }, start 540 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04400, .end = 0x00a04454 }, start 541 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04460, .end = 0x00a04474 }, start 542 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a044c0, .end = 0x00a044ec }, start 543 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04500, .end = 0x00a04504 }, start 544 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04510, .end = 0x00a04538 }, start 545 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04540, .end = 0x00a04548 }, start 546 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04560, .end = 0x00a04560 }, start 547 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04570, .end = 0x00a0457c }, start 548 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04590, .end = 0x00a04590 }, start 549 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a04598, .end = 0x00a04598 }, start 550 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a045c0, .end = 0x00a045f4 }, start 551 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a05c18, .end = 0x00a05c1c }, start 552 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c000, .end = 0x00a0c018 }, start 553 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c020, .end = 0x00a0c028 }, start 554 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c038, .end = 0x00a0c094 }, start 555 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c0c0, .end = 0x00a0c104 }, start 556 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c10c, .end = 0x00a0c118 }, start 557 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c150, .end = 0x00a0c174 }, start 558 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c17c, .end = 0x00a0c188 }, start 559 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c190, .end = 0x00a0c198 }, start 560 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, start 561 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, start 565 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00d03c00, .end = 0x00d03c64 }, start 566 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00d05c18, .end = 0x00d05c1c }, start 567 drivers/net/wireless/intel/iwlwifi/fw/dbg.c { .start = 0x00d0c000, .end = 0x00d0c174 }, start 570 drivers/net/wireless/intel/iwlwifi/fw/dbg.c static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, start 576 drivers/net/wireless/intel/iwlwifi/fw/dbg.c *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); start 601 drivers/net/wireless/intel/iwlwifi/fw/dbg.c iwl_prph_dump_addr[i].start + 4; start 607 drivers/net/wireless/intel/iwlwifi/fw/dbg.c prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); start 609 drivers/net/wireless/intel/iwlwifi/fw/dbg.c iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, start 612 drivers/net/wireless/intel/iwlwifi/fw/dbg.c iwl_prph_dump_addr[i].start + 4, start 673 drivers/net/wireless/intel/iwlwifi/fw/dbg.c iwl_prph_dump_addr[i].start + 4; start 1286 drivers/net/wireless/intel/iwlwifi/iwl-drv.c op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); start 145 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h struct iwl_op_mode *(*start)(struct iwl_trans *trans, start 5012 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c .start = iwl_mvm_mac_start, start 1092 drivers/net/wireless/intel/iwlwifi/mvm/ops.c int hw_queue, bool start) start 1118 drivers/net/wireless/intel/iwlwifi/mvm/ops.c if (!start) start 1142 drivers/net/wireless/intel/iwlwifi/mvm/ops.c mvmtxq->stopped = !start; start 1144 drivers/net/wireless/intel/iwlwifi/mvm/ops.c if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) start 1346 drivers/net/wireless/intel/iwlwifi/mvm/ops.c .start = iwl_op_mode_mvm_start, \ start 2525 drivers/net/wireless/intel/iwlwifi/mvm/sta.c int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) start 2535 drivers/net/wireless/intel/iwlwifi/mvm/sta.c if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { start 2540 drivers/net/wireless/intel/iwlwifi/mvm/sta.c if (iwl_mvm_has_new_rx_api(mvm) && start) { start 2584 drivers/net/wireless/intel/iwlwifi/mvm/sta.c if (start) { start 2591 drivers/net/wireless/intel/iwlwifi/mvm/sta.c cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : start 2604 drivers/net/wireless/intel/iwlwifi/mvm/sta.c start ? "start" : "stopp"); start 2613 drivers/net/wireless/intel/iwlwifi/mvm/sta.c start ? "start" : "stopp", status); start 2620 drivers/net/wireless/intel/iwlwifi/mvm/sta.c if (start) { start 2691 drivers/net/wireless/intel/iwlwifi/mvm/sta.c int tid, u8 queue, bool start) start 2700 drivers/net/wireless/intel/iwlwifi/mvm/sta.c if (start) { start 2730 drivers/net/wireless/intel/iwlwifi/mvm/sta.c start ? "start" : "stopp", status); start 529 drivers/net/wireless/intel/iwlwifi/mvm/sta.h int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); start 541 drivers/net/wireless/intel/iwlwifi/mvm/sta.h int tid, u8 queue, bool start); start 760 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int start) start 764 drivers/net/wireless/intel/iwlwifi/pcie/internal.h while (start < fw->num_sec && start 765 drivers/net/wireless/intel/iwlwifi/pcie/internal.h fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && start 766 drivers/net/wireless/intel/iwlwifi/pcie/internal.h fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { start 767 drivers/net/wireless/intel/iwlwifi/pcie/internal.h start++; start 373 drivers/net/wireless/intersil/hostap/hostap_ap.c .start = ap_control_proc_start, start 575 drivers/net/wireless/intersil/hostap/hostap_ap.c .start = prism2_ap_proc_start, start 229 drivers/net/wireless/intersil/hostap/hostap_cs.c hw_priv->link->resource[0]->start & 0x00ff); start 237 drivers/net/wireless/intersil/hostap/hostap_cs.c (hw_priv->link->resource[0]->start >> 8) & 0x00ff); start 532 drivers/net/wireless/intersil/hostap/hostap_cs.c dev->base_addr = link->resource[0]->start; start 218 drivers/net/wireless/intersil/hostap/hostap_download.c .start = prism2_download_aux_dump_proc_start, start 1290 drivers/net/wireless/intersil/hostap/hostap_hw.c unsigned long start, delay; start 1317 drivers/net/wireless/intersil/hostap/hostap_hw.c start = jiffies; start 1335 drivers/net/wireless/intersil/hostap/hostap_hw.c (jiffies - start) * 1000 / HZ); start 118 drivers/net/wireless/intersil/hostap/hostap_proc.c .start = prism2_wds_proc_start, start 171 drivers/net/wireless/intersil/hostap/hostap_proc.c .start = prism2_bss_list_proc_start, start 232 drivers/net/wireless/intersil/hostap/hostap_proc.c static int prism2_io_debug_proc_read(char *page, char **start, off_t off, start 262 drivers/net/wireless/intersil/hostap/hostap_proc.c *start = page; start 348 drivers/net/wireless/intersil/hostap/hostap_proc.c .start = prism2_scan_results_proc_start, start 154 drivers/net/wireless/intersil/orinoco/orinoco_cs.c mem = ioport_map(link->resource[0]->start, start 179 drivers/net/wireless/intersil/orinoco/orinoco_cs.c if (orinoco_if_add(priv, link->resource[0]->start, start 216 drivers/net/wireless/intersil/orinoco/spectrum_cs.c mem = ioport_map(link->resource[0]->start, start 246 drivers/net/wireless/intersil/orinoco/spectrum_cs.c if (orinoco_if_add(priv, link->resource[0]->start, start 707 drivers/net/wireless/intersil/p54/main.c .start = p54_start, start 514 drivers/net/wireless/mac80211_hwsim.c unsigned long next_start, start, end; start 1654 drivers/net/wireless/mac80211_hwsim.c data->survey_data[idx].start = start 1888 drivers/net/wireless/mac80211_hwsim.c hwsim->survey_data[idx].start); start 2069 drivers/net/wireless/mac80211_hwsim.c hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies; start 2338 drivers/net/wireless/mac80211_hwsim.c .start = mac80211_hwsim_start, \ start 834 drivers/net/wireless/marvell/libertas/if_cs.c card->iobase = ioport_map(p_dev->resource[0]->start, start 68 drivers/net/wireless/marvell/libertas_tf/cmd.c for (ch = range->start; ch < range->end; ch++) start 166 drivers/net/wireless/marvell/libertas_tf/libertas_tf.h u8 start; start 477 drivers/net/wireless/marvell/libertas_tf/main.c .start = lbtf_op_start, start 5606 drivers/net/wireless/marvell/mwl8k.c .start = mwl8k_start, start 54 drivers/net/wireless/mediatek/mt76/agg-rx.c int start, idx, nframes; start 61 drivers/net/wireless/mediatek/mt76/agg-rx.c start = tid->head % tid->size; start 65 drivers/net/wireless/mediatek/mt76/agg-rx.c idx != start && nframes; start 658 drivers/net/wireless/mediatek/mt76/mt7603/main.c .start = mt7603_start, start 494 drivers/net/wireless/mediatek/mt76/mt7615/main.c .start = mt7615_start, start 24 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c u32 start = 0, end = 0, cnt_free; start 33 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c if (!start) start 34 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c start = MT_EE_USAGE_MAP_START + i; start 37 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c cnt_free = end - start + 1; start 75 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c .start = mt76x0e_start, start 121 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c .start = mt76x0u_start, start 158 drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c .start = mt76x2_start, start 105 drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c .start = mt76x2u_start, start 102 drivers/net/wireless/mediatek/mt7601u/debugfs.c seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start, start 103 drivers/net/wireless/mediatek/mt7601u/debugfs.c dev->ee->reg.start + dev->ee->reg.num - 1); start 173 drivers/net/wireless/mediatek/mt7601u/dma.c buf = &q->e[q->start]; start 175 drivers/net/wireless/mediatek/mt7601u/dma.c q->start = (q->start + 1) % q->entries; start 250 drivers/net/wireless/mediatek/mt7601u/dma.c if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) start 253 drivers/net/wireless/mediatek/mt7601u/dma.c skb = q->e[q->start].skb; start 254 drivers/net/wireless/mediatek/mt7601u/dma.c q->e[q->start].skb = NULL; start 263 drivers/net/wireless/mediatek/mt7601u/dma.c q->start = (q->start + 1) % q->entries; start 72 drivers/net/wireless/mediatek/mt7601u/eeprom.c u32 start = 0, end = 0, cnt_free; start 83 drivers/net/wireless/mediatek/mt7601u/eeprom.c if (!start) start 84 drivers/net/wireless/mediatek/mt7601u/eeprom.c start = MT_EE_USAGE_MAP_START + i; start 87 drivers/net/wireless/mediatek/mt7601u/eeprom.c cnt_free = end - start + 1; start 192 drivers/net/wireless/mediatek/mt7601u/eeprom.c val, chan_bounds[idx].start, start 193 drivers/net/wireless/mediatek/mt7601u/eeprom.c chan_bounds[idx].start + chan_bounds[idx].num - 1); start 89 drivers/net/wireless/mediatek/mt7601u/eeprom.h u8 start; start 565 drivers/net/wireless/mediatek/mt7601u/init.c WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > start 569 drivers/net/wireless/mediatek/mt7601u/init.c &mt76_channels_2ghz[dev->ee->reg.start - 1], start 410 drivers/net/wireless/mediatek/mt7601u/main.c .start = mt7601u_start, start 75 drivers/net/wireless/mediatek/mt7601u/mt7601u.h unsigned int start; start 91 drivers/net/wireless/mediatek/mt7601u/mt7601u.h unsigned int start; start 119 drivers/net/wireless/quantenna/qtnfmac/core.c unsigned int start; start 135 drivers/net/wireless/quantenna/qtnfmac/core.c start = u64_stats_fetch_begin_irq(&stats64->syncp); start 140 drivers/net/wireless/quantenna/qtnfmac/core.c } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); start 1706 drivers/net/wireless/ralink/rt2x00/rt2400pci.c .start = rt2x00mac_start, start 2005 drivers/net/wireless/ralink/rt2x00/rt2500pci.c .start = rt2x00mac_start, start 1798 drivers/net/wireless/ralink/rt2x00/rt2500usb.c .start = rt2x00mac_start, start 291 drivers/net/wireless/ralink/rt2x00/rt2800pci.c .start = rt2x00mac_start, start 136 drivers/net/wireless/ralink/rt2x00/rt2800soc.c .start = rt2x00mac_start, start 633 drivers/net/wireless/ralink/rt2x00/rt2800usb.c .start = rt2x00mac_start, start 787 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c enum queue_index start, start 798 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { start 801 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c start, end); start 812 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index_start = queue->index[start]; start 581 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h enum queue_index start, start 43 drivers/net/wireless/ralink/rt2x00/rt2x00soc.c rt2x00dev->csr.base = ioremap(res->start, resource_size(res)); start 2871 drivers/net/wireless/ralink/rt2x00/rt61pci.c .start = rt2x00mac_start, start 2294 drivers/net/wireless/ralink/rt2x00/rt73usb.c .start = rt2x00mac_start, start 375 drivers/net/wireless/ray_cs.c link->resource[2]->start = 0; start 383 drivers/net/wireless/ray_cs.c local->sram = ioremap(link->resource[2]->start, start 389 drivers/net/wireless/ray_cs.c link->resource[3]->start = 0; start 397 drivers/net/wireless/ray_cs.c local->rmem = ioremap(link->resource[3]->start, start 403 drivers/net/wireless/ray_cs.c link->resource[4]->start = 0; start 411 drivers/net/wireless/ray_cs.c local->amem = ioremap(link->resource[4]->start, start 1602 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c .start = rtl8180_start, start 1380 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c .start = rtl8187_start, start 1399 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start); start 3316 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start) start 3322 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c h2c.bt_wlan_calibration.data = start; start 5899 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c .start = rtl8xxxu_start, start 1877 drivers/net/wireless/realtek/rtlwifi/core.c .start = rtl_op_start, start 214 drivers/net/wireless/realtek/rtlwifi/debug.c int start = debugfs_priv->cb_data; start 220 drivers/net/wireless/realtek/rtlwifi/debug.c int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11); start 225 drivers/net/wireless/realtek/rtlwifi/debug.c start, end - 1); start 227 drivers/net/wireless/realtek/rtlwifi/debug.c for (j = start; j < end; j++) { start 525 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, start 533 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c if (i >= start && i <= end) { start 411 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, start 419 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c if (i >= start && i <= end) { start 1109 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, start 1117 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c if (i >= start && i <= end) { start 14 drivers/net/wireless/realtek/rtw88/hci.h int (*start)(struct rtw_dev *rtwdev); start 42 drivers/net/wireless/realtek/rtw88/hci.h return rtwdev->hci.ops->start(rtwdev); start 517 drivers/net/wireless/realtek/rtw88/mac80211.c .start = rtw_ops_start, start 323 drivers/net/wireless/realtek/rtw88/main.c static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr) start 328 drivers/net/wireless/realtek/rtw88/main.c rtw_write8(rtwdev, start + i, addr[i]); start 1148 drivers/net/wireless/realtek/rtw88/pci.c .start = rtw_pci_start, start 1976 drivers/net/wireless/rsi/rsi_91x_mac80211.c .start = rsi_mac80211_start, start 206 drivers/net/wireless/st/cw1200/main.c .start = cw1200_start, start 1452 drivers/net/wireless/st/cw1200/sta.c struct wsm_start start = { start 1462 drivers/net/wireless/st/cw1200/sta.c start.band = priv->channel->band == NL80211_BAND_5GHZ ? start 1464 drivers/net/wireless/st/cw1200/sta.c start.channel_number = priv->channel->hw_value; start 1466 drivers/net/wireless/st/cw1200/sta.c start.band = WSM_PHY_BAND_2_4G; start 1467 drivers/net/wireless/st/cw1200/sta.c start.channel_number = 1; start 1470 drivers/net/wireless/st/cw1200/sta.c return wsm_start(priv, &start); start 2311 drivers/net/wireless/st/cw1200/sta.c struct wsm_start start = { start 2331 drivers/net/wireless/st/cw1200/sta.c memset(start.ssid, 0, sizeof(start.ssid)); start 2333 drivers/net/wireless/st/cw1200/sta.c start.ssid_len = conf->ssid_len; start 2334 drivers/net/wireless/st/cw1200/sta.c memcpy(start.ssid, conf->ssid, start.ssid_len); start 2343 drivers/net/wireless/st/cw1200/sta.c start.channel_number, start.band, start 2344 drivers/net/wireless/st/cw1200/sta.c start.beacon_interval, start.dtim_period, start 2345 drivers/net/wireless/st/cw1200/sta.c start.basic_rate_set, start 2346 drivers/net/wireless/st/cw1200/sta.c start.ssid_len, start.ssid); start 2347 drivers/net/wireless/st/cw1200/sta.c ret = wsm_start(priv, &start); start 167 drivers/net/wireless/ti/wl1251/io.c partition[0].start = mem_start; start 169 drivers/net/wireless/ti/wl1251/io.c partition[1].start = reg_start; start 1353 drivers/net/wireless/ti/wl1251/main.c .start = wl1251_op_start, start 54 drivers/net/wireless/ti/wl1251/ps.c unsigned long timeout, start; start 64 drivers/net/wireless/ti/wl1251/ps.c start = jiffies; start 85 drivers/net/wireless/ti/wl1251/ps.c jiffies_to_msecs(jiffies - start)); start 127 drivers/net/wireless/ti/wl1251/wl1251.h u32 start; start 492 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 496 drivers/net/wireless/ti/wl12xx/main.c .start = REGISTERS_BASE, start 500 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 504 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 512 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00040000, start 516 drivers/net/wireless/ti/wl12xx/main.c .start = REGISTERS_BASE, start 520 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 524 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 531 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00040000, start 535 drivers/net/wireless/ti/wl12xx/main.c .start = REGISTERS_BASE, start 539 drivers/net/wireless/ti/wl12xx/main.c .start = 0x003004f8, start 543 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 550 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00040000, start 554 drivers/net/wireless/ti/wl12xx/main.c .start = DRPW_BASE, start 558 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 562 drivers/net/wireless/ti/wl12xx/main.c .start = 0x00000000, start 416 drivers/net/wireless/ti/wl12xx/scan.c struct wl1271_cmd_sched_scan_start *start; start 428 drivers/net/wireless/ti/wl12xx/scan.c start = kzalloc(sizeof(*start), GFP_KERNEL); start 429 drivers/net/wireless/ti/wl12xx/scan.c if (!start) start 432 drivers/net/wireless/ti/wl12xx/scan.c start->role_id = wlvif->role_id; start 433 drivers/net/wireless/ti/wl12xx/scan.c start->tag = WL1271_SCAN_DEFAULT_TAG; start 435 drivers/net/wireless/ti/wl12xx/scan.c ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, start 436 drivers/net/wireless/ti/wl12xx/scan.c sizeof(*start), 0); start 443 drivers/net/wireless/ti/wl12xx/scan.c kfree(start); start 162 drivers/net/wireless/ti/wl18xx/cmd.c int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start) start 168 drivers/net/wireless/ti/wl18xx/cmd.c wlvif->channel, start ? "start" : "stop"); start 181 drivers/net/wireless/ti/wl18xx/cmd.c start ? CMD_CAC_START : CMD_CAC_STOP, start 79 drivers/net/wireless/ti/wl18xx/cmd.h int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start); start 582 drivers/net/wireless/ti/wl18xx/main.c .mem = { .start = 0x00A00000, .size = 0x00012000 }, start 583 drivers/net/wireless/ti/wl18xx/main.c .reg = { .start = 0x00807000, .size = 0x00005000 }, start 584 drivers/net/wireless/ti/wl18xx/main.c .mem2 = { .start = 0x00800000, .size = 0x0000B000 }, start 585 drivers/net/wireless/ti/wl18xx/main.c .mem3 = { .start = 0x00401594, .size = 0x00001020 }, start 588 drivers/net/wireless/ti/wl18xx/main.c .mem = { .start = 0x00000000, .size = 0x00014000 }, start 589 drivers/net/wireless/ti/wl18xx/main.c .reg = { .start = 0x00810000, .size = 0x0000BFFF }, start 590 drivers/net/wireless/ti/wl18xx/main.c .mem2 = { .start = 0x00000000, .size = 0x00000000 }, start 591 drivers/net/wireless/ti/wl18xx/main.c .mem3 = { .start = 0x00000000, .size = 0x00000000 }, start 594 drivers/net/wireless/ti/wl18xx/main.c .mem = { .start = 0x00700000, .size = 0x0000030c }, start 595 drivers/net/wireless/ti/wl18xx/main.c .reg = { .start = 0x00802000, .size = 0x00014578 }, start 596 drivers/net/wireless/ti/wl18xx/main.c .mem2 = { .start = 0x00B00404, .size = 0x00001000 }, start 597 drivers/net/wireless/ti/wl18xx/main.c .mem3 = { .start = 0x00C00000, .size = 0x00000400 }, start 600 drivers/net/wireless/ti/wl18xx/main.c .mem = { .start = 0x00800000, .size = 0x000050FC }, start 601 drivers/net/wireless/ti/wl18xx/main.c .reg = { .start = 0x00B00404, .size = 0x00001000 }, start 602 drivers/net/wireless/ti/wl18xx/main.c .mem2 = { .start = 0x00C00000, .size = 0x00000400 }, start 603 drivers/net/wireless/ti/wl18xx/main.c .mem3 = { .start = 0x00401594, .size = 0x00001020 }, start 606 drivers/net/wireless/ti/wl18xx/main.c .mem = { .start = WL18XX_PHY_INIT_MEM_ADDR, start 608 drivers/net/wireless/ti/wl18xx/main.c .reg = { .start = 0x00000000, .size = 0x00000000 }, start 609 drivers/net/wireless/ti/wl18xx/main.c .mem2 = { .start = 0x00000000, .size = 0x00000000 }, start 610 drivers/net/wireless/ti/wl18xx/main.c .mem3 = { .start = 0x00000000, .size = 0x00000000 }, start 187 drivers/net/wireless/ti/wlcore/boot.c partition.mem.start = dest; start 203 drivers/net/wireless/ti/wlcore/boot.c partition.mem.start = addr; start 365 drivers/net/wireless/ti/wlcore/boot.c dest_addr += wl->curr_part.reg.start; start 1080 drivers/net/wireless/ti/wlcore/debugfs.c part.mem.start = *ppos; start 1162 drivers/net/wireless/ti/wlcore/debugfs.c part.mem.start = *ppos; start 302 drivers/net/wireless/ti/wlcore/hw_ops.h wlcore_hw_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start) start 307 drivers/net/wireless/ti/wlcore/hw_ops.h return wl->ops->set_cac(wl, wlvif, start); start 69 drivers/net/wireless/ti/wlcore/io.c if ((addr >= part->mem.start) && start 70 drivers/net/wireless/ti/wlcore/io.c (addr < part->mem.start + part->mem.size)) start 71 drivers/net/wireless/ti/wlcore/io.c return addr - part->mem.start; start 72 drivers/net/wireless/ti/wlcore/io.c else if ((addr >= part->reg.start) && start 73 drivers/net/wireless/ti/wlcore/io.c (addr < part->reg.start + part->reg.size)) start 74 drivers/net/wireless/ti/wlcore/io.c return addr - part->reg.start + part->mem.size; start 75 drivers/net/wireless/ti/wlcore/io.c else if ((addr >= part->mem2.start) && start 76 drivers/net/wireless/ti/wlcore/io.c (addr < part->mem2.start + part->mem2.size)) start 77 drivers/net/wireless/ti/wlcore/io.c return addr - part->mem2.start + part->mem.size + start 79 drivers/net/wireless/ti/wlcore/io.c else if ((addr >= part->mem3.start) && start 80 drivers/net/wireless/ti/wlcore/io.c (addr < part->mem3.start + part->mem3.size)) start 81 drivers/net/wireless/ti/wlcore/io.c return addr - part->mem3.start + part->mem.size + start 132 drivers/net/wireless/ti/wlcore/io.c p->mem.start, p->mem.size); start 134 drivers/net/wireless/ti/wlcore/io.c p->reg.start, p->reg.size); start 136 drivers/net/wireless/ti/wlcore/io.c p->mem2.start, p->mem2.size); start 138 drivers/net/wireless/ti/wlcore/io.c p->mem3.start, p->mem3.size); start 140 drivers/net/wireless/ti/wlcore/io.c ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); start 148 drivers/net/wireless/ti/wlcore/io.c ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); start 156 drivers/net/wireless/ti/wlcore/io.c ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); start 175 drivers/net/wireless/ti/wlcore/io.c ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); start 5997 drivers/net/wireless/ti/wlcore/main.c .start = wl1271_op_start, start 6592 drivers/net/wireless/ti/wlcore/main.c wl->irq = res->start; start 6632 drivers/net/wireless/ti/wlcore/main.c wl->wakeirq = res->start; start 132 drivers/net/wireless/ti/wlcore/scan.c int start, int max_channels, start 177 drivers/net/wireless/ti/wlcore/scan.c for (i = 0, j = start; start 242 drivers/net/wireless/ti/wlcore/scan.c return j - start; start 336 drivers/net/wireless/ti/wlcore/sdio.c res[0].start = irq; start 343 drivers/net/wireless/ti/wlcore/sdio.c res[1].start = wakeirq; start 523 drivers/net/wireless/ti/wlcore/spi.c res[0].start = spi->irq; start 116 drivers/net/wireless/ti/wlcore/wlcore.h bool start); start 133 drivers/net/wireless/ti/wlcore/wlcore.h u32 start; start 1907 drivers/net/wireless/wl3501_cs.c link->resource[0]->start = j; start 1908 drivers/net/wireless/wl3501_cs.c link->resource[1]->start = link->resource[0]->start + 0x10; start 1928 drivers/net/wireless/wl3501_cs.c dev->base_addr = link->resource[0]->start; start 1347 drivers/net/wireless/zydas/zd1211rw/zd_mac.c .start = zd_op_start, start 370 drivers/net/xen-netback/netback.c int start; start 377 drivers/net/xen-netback/netback.c start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); start 379 drivers/net/xen-netback/netback.c for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; start 1105 drivers/net/xen-netfront.c unsigned int start; start 1108 drivers/net/xen-netfront.c start = u64_stats_fetch_begin_irq(&tx_stats->syncp); start 1111 drivers/net/xen-netfront.c } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); start 1114 drivers/net/xen-netfront.c start = u64_stats_fetch_begin_irq(&rx_stats->syncp); start 1117 drivers/net/xen-netfront.c } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); start 30 drivers/nvdimm/badrange.c bre->start = addr; start 69 drivers/nvdimm/badrange.c if (bre->start == addr) { start 101 drivers/nvdimm/badrange.c void badrange_forget(struct badrange *badrange, phys_addr_t start, start 105 drivers/nvdimm/badrange.c u64 clr_end = start + len - 1; start 119 drivers/nvdimm/badrange.c u64 bre_end = bre->start + bre->length - 1; start 122 drivers/nvdimm/badrange.c if (bre_end < start) start 124 drivers/nvdimm/badrange.c if (bre->start > clr_end) start 127 drivers/nvdimm/badrange.c if ((bre->start >= start) && (bre_end <= clr_end)) { start 133 drivers/nvdimm/badrange.c if ((start <= bre->start) && (clr_end > bre->start)) { start 134 drivers/nvdimm/badrange.c bre->length -= clr_end - bre->start + 1; start 135 drivers/nvdimm/badrange.c bre->start = clr_end + 1; start 139 drivers/nvdimm/badrange.c if ((bre->start < start) && (bre_end <= clr_end)) { start 141 drivers/nvdimm/badrange.c bre->length = start - bre->start; start 149 drivers/nvdimm/badrange.c if ((bre->start < start) && (bre_end > clr_end)) { start 157 drivers/nvdimm/badrange.c bre->length = start - bre->start; start 222 drivers/nvdimm/badrange.c u64 bre_end = bre->start + bre->length - 1; start 225 drivers/nvdimm/badrange.c if (bre_end < res->start) start 227 drivers/nvdimm/badrange.c if (bre->start > res->end) start 230 drivers/nvdimm/badrange.c if (bre->start >= res->start) { start 231 drivers/nvdimm/badrange.c u64 start = bre->start; start 237 drivers/nvdimm/badrange.c len = res->start + resource_size(res) start 238 drivers/nvdimm/badrange.c - bre->start; start 239 drivers/nvdimm/badrange.c __add_badblock_range(bb, start - res->start, len); start 246 drivers/nvdimm/badrange.c if (bre->start < res->start) { start 250 drivers/nvdimm/badrange.c len = bre->start + bre->length - res->start; start 43 drivers/nvdimm/blk.c return nsblk->res[i]->start + ns_offset; start 170 drivers/nvdimm/blk.c unsigned long start; start 181 drivers/nvdimm/blk.c do_acct = nd_iostat_start(bio, &start); start 198 drivers/nvdimm/blk.c nd_iostat_end(bio, start); start 746 drivers/nvdimm/btt.c size_t start, size_t arena_off) start 763 drivers/nvdimm/btt.c arena->external_lba_start = start; start 1447 drivers/nvdimm/btt.c unsigned long start; start 1455 drivers/nvdimm/btt.c do_acct = nd_iostat_start(bio, &start); start 1480 drivers/nvdimm/btt.c nd_iostat_end(bio, start); start 912 drivers/nvdimm/bus.c resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend; start 917 drivers/nvdimm/bus.c start = clear_err->address; start 934 drivers/nvdimm/bus.c pstart = nsio->res.start + offset; start 937 drivers/nvdimm/bus.c if ((pstart >= start) && (pend <= end)) start 283 drivers/nvdimm/claim.c nsio->res.start + offset, size); start 309 drivers/nvdimm/claim.c if (!devm_request_mem_region(dev, res->start, resource_size(res), start 321 drivers/nvdimm/claim.c nsio->addr = devm_memremap(dev, res->start, resource_size(res), start 334 drivers/nvdimm/claim.c devm_release_mem_region(dev, res->start, resource_size(res)); start 50 drivers/nvdimm/dimm.c ndd->dpa.start = 0; start 583 drivers/nvdimm/dimm_devs.c map_end = nd_mapping->start + nd_mapping->size - 1; start 584 drivers/nvdimm/dimm_devs.c blk_start = nd_mapping->start; start 592 drivers/nvdimm/dimm_devs.c if (info->res->start >= nd_mapping->start start 593 drivers/nvdimm/dimm_devs.c && info->res->start < map_end) start 607 drivers/nvdimm/dimm_devs.c if ((res->start >= blk_start && res->start < map_end) start 619 drivers/nvdimm/dimm_devs.c if (info->res && blk_start > info->res->start) { start 620 drivers/nvdimm/dimm_devs.c info->res->start = max(info->res->start, blk_start); start 621 drivers/nvdimm/dimm_devs.c if (info->res->start > info->res->end) start 622 drivers/nvdimm/dimm_devs.c info->res->end = info->res->start - 1; start 626 drivers/nvdimm/dimm_devs.c info->available -= blk_start - nd_mapping->start; start 723 drivers/nvdimm/dimm_devs.c map_start = nd_mapping->start; start 727 drivers/nvdimm/dimm_devs.c if (res->start >= map_start && res->start < map_end) { start 730 drivers/nvdimm/dimm_devs.c max(map_start, res->start)); start 746 drivers/nvdimm/dimm_devs.c } else if (map_start > res->start && map_start < res->end) { start 768 drivers/nvdimm/dimm_devs.c __release_region(&ndd->dpa, res->start, resource_size(res)); start 772 drivers/nvdimm/dimm_devs.c struct nd_label_id *label_id, resource_size_t start, start 782 drivers/nvdimm/dimm_devs.c res = __request_region(&ndd->dpa, start, n, name, 0); start 50 drivers/nvdimm/e820.c ndr_desc.numa_node = e820_range_to_nid(res->start); start 808 drivers/nvdimm/label.c nd_label->dpa = __cpu_to_le64(res->start); start 882 drivers/nvdimm/label.c if (res->start != __le64_to_cpu(nd_label->dpa)) start 977 drivers/nvdimm/label.c if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) { start 991 drivers/nvdimm/label.c if (res->start < min->start) start 1034 drivers/nvdimm/label.c nd_label->dpa = __cpu_to_le64(res->start); start 149 drivers/nvdimm/namespace_devs.c if (region_intersects(nsio->res.start, resource_size(&nsio->res), start 479 drivers/nvdimm/namespace_devs.c new_start = res->start + n; start 481 drivers/nvdimm/namespace_devs.c new_start = res->start; start 533 drivers/nvdimm/namespace_devs.c first_dpa = nd_mapping->start + nd_mapping->size - n; start 535 drivers/nvdimm/namespace_devs.c first_dpa = nd_mapping->start; start 571 drivers/nvdimm/namespace_devs.c if (valid->start >= valid->end) start 601 drivers/nvdimm/namespace_devs.c if (valid->start == exist->end + 1 start 602 drivers/nvdimm/namespace_devs.c || valid->end == exist->start - 1) start 607 drivers/nvdimm/namespace_devs.c valid->end = valid->start - 1; start 618 drivers/nvdimm/namespace_devs.c resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; start 629 drivers/nvdimm/namespace_devs.c valid.start = nd_mapping->start; start 642 drivers/nvdimm/namespace_devs.c if (res->start > mapping_end) start 644 drivers/nvdimm/namespace_devs.c if (res->end < nd_mapping->start) start 648 drivers/nvdimm/namespace_devs.c if (!first++ && res->start > nd_mapping->start) { start 649 drivers/nvdimm/namespace_devs.c valid.start = nd_mapping->start; start 650 drivers/nvdimm/namespace_devs.c valid.end = res->start - 1; start 660 drivers/nvdimm/namespace_devs.c valid.start = res->start + resource_size(res); start 661 drivers/nvdimm/namespace_devs.c valid.end = min(mapping_end, next->start - 1); start 671 drivers/nvdimm/namespace_devs.c valid.start = res->start + resource_size(res); start 687 drivers/nvdimm/namespace_devs.c rc = adjust_resource(res, res->start - allocate, start 696 drivers/nvdimm/namespace_devs.c rc = adjust_resource(next, next->start start 719 drivers/nvdimm/namespace_devs.c valid.start += available - allocate; start 722 drivers/nvdimm/namespace_devs.c valid.start, allocate); start 727 drivers/nvdimm/namespace_devs.c rc = adjust_resource(res, res->start, resource_size(res) start 778 drivers/nvdimm/namespace_devs.c resource_size_t end = res->start + resource_size(res); start 782 drivers/nvdimm/namespace_devs.c || end != next->start) start 786 drivers/nvdimm/namespace_devs.c rc = adjust_resource(res, res->start, end - res->start); start 947 drivers/nvdimm/namespace_devs.c offset = (res->start - nd_mapping->start) start 957 drivers/nvdimm/namespace_devs.c res->start = nd_region->ndr_start + offset; start 958 drivers/nvdimm/namespace_devs.c res->end = res->start + size - 1; start 1330 drivers/nvdimm/namespace_devs.c return sprintf(buf, "%#llx\n", (unsigned long long) res->start); start 1784 drivers/nvdimm/namespace_devs.c res->start = nd_region->ndr_start; start 1785 drivers/nvdimm/namespace_devs.c res->end = res->start + nd_region->ndr_size - 1; start 1881 drivers/nvdimm/namespace_devs.c hw_start = nd_mapping->start; start 2039 drivers/nvdimm/namespace_devs.c resource_size_t start) start 2053 drivers/nvdimm/namespace_devs.c && res->start == start) { start 2290 drivers/nvdimm/namespace_devs.c return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start, start 2297 drivers/nvdimm/namespace_devs.c return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start, start 2307 drivers/nvdimm/namespace_devs.c resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; start 2325 drivers/nvdimm/namespace_devs.c if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || start 151 drivers/nvdimm/nd-core.h int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, start 158 drivers/nvdimm/nd-core.h resource_size_t start); start 94 drivers/nvdimm/nd.h (unsigned long long) (res ? res->start : 0), ##arg) start 124 drivers/nvdimm/nd.h u64 start; start 361 drivers/nvdimm/nd.h struct nd_label_id *label_id, resource_size_t start, start 399 drivers/nvdimm/nd.h void __nd_iostat_start(struct bio *bio, unsigned long *start); start 400 drivers/nvdimm/nd.h static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) start 407 drivers/nvdimm/nd.h *start = jiffies; start 412 drivers/nvdimm/nd.h static inline void nd_iostat_end(struct bio *bio, unsigned long start) start 416 drivers/nvdimm/nd.h generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start); start 222 drivers/nvdimm/pfn_devs.c rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start start 395 drivers/nvdimm/pfn_devs.c + (first_bad << 9)) - nsio->res.start, start 586 drivers/nvdimm/pfn_devs.c if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) start 674 drivers/nvdimm/pfn_devs.c resource_size_t base = nsio->res.start + start_pad; start 683 drivers/nvdimm/pfn_devs.c res->start += start_pad; start 711 drivers/nvdimm/pfn_devs.c resource_size_t start, size; start 752 drivers/nvdimm/pfn_devs.c start = nsio->res.start; start 756 drivers/nvdimm/pfn_devs.c end_trunc = start + size - ALIGN_DOWN(start + size, align); start 770 drivers/nvdimm/pfn_devs.c offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align) start 771 drivers/nvdimm/pfn_devs.c - start; start 773 drivers/nvdimm/pfn_devs.c offset = ALIGN(start + SZ_8K, align) - start; start 190 drivers/nvdimm/pmem.c unsigned long start; start 199 drivers/nvdimm/pmem.c do_acct = nd_iostat_start(bio, &start); start 209 drivers/nvdimm/pmem.c nd_iostat_end(bio, start); start 387 drivers/nvdimm/pmem.c pmem->phys_addr = res->start; start 395 drivers/nvdimm/pmem.c if (!devm_request_mem_region(dev, res->start, resource_size(res), start 417 drivers/nvdimm/pmem.c bb_res.start += pmem->data_offset; start 605 drivers/nvdimm/pmem.c res.start = nsio->res.start + offset; start 47 drivers/nvdimm/region.c ndr_res.start = nd_region->ndr_start; start 127 drivers/nvdimm/region.c res.start = nd_region->ndr_start; start 34 drivers/nvdimm/region_devs.c unsigned long pfn = PHYS_PFN(res->start); start 40 drivers/nvdimm/region_devs.c unsigned long pfn_j = PHYS_PFN(res_j->start); start 56 drivers/nvdimm/region_devs.c + (res->start & ~PAGE_MASK)); start 763 drivers/nvdimm/region_devs.c nd_mapping->start, nd_mapping->size, start 947 drivers/nvdimm/region_devs.c if ((mapping->start | mapping->size) % PAGE_SIZE) { start 1009 drivers/nvdimm/region_devs.c nd_region->mapping[i].start = mapping->start; start 1036 drivers/nvdimm/region_devs.c nd_region->ndr_start = ndr_desc->res->start; start 1181 drivers/nvdimm/region_devs.c resource_size_t start, size; start 1197 drivers/nvdimm/region_devs.c res_end = ctx->start + ctx->size; start 1200 drivers/nvdimm/region_devs.c if (ctx->start >= region_start && ctx->start < region_end) start 1207 drivers/nvdimm/region_devs.c int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, start 1213 drivers/nvdimm/region_devs.c .start = start, start 62 drivers/nvdimm/virtio_pmem.c start, &vpmem->start); start 66 drivers/nvdimm/virtio_pmem.c res.start = vpmem->start; start 67 drivers/nvdimm/virtio_pmem.c res.end = vpmem->start + vpmem->size - 1; start 49 drivers/nvdimm/virtio_pmem.h __u64 start; start 977 drivers/nvme/host/pci.c static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) start 979 drivers/nvme/host/pci.c while (start != end) { start 980 drivers/nvme/host/pci.c nvme_handle_cqe(nvmeq, start); start 981 drivers/nvme/host/pci.c if (++start == nvmeq->q_depth) start 982 drivers/nvme/host/pci.c start = 0; start 996 drivers/nvme/host/pci.c static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start, start 1001 drivers/nvme/host/pci.c *start = nvmeq->cq_head; start 1009 drivers/nvme/host/pci.c if (*start != *end) start 1018 drivers/nvme/host/pci.c u16 start, end; start 1027 drivers/nvme/host/pci.c nvme_process_cq(nvmeq, &start, &end, -1); start 1031 drivers/nvme/host/pci.c if (start != end) { start 1032 drivers/nvme/host/pci.c nvme_complete_cqes(nvmeq, start, end); start 1054 drivers/nvme/host/pci.c u16 start, end; start 1064 drivers/nvme/host/pci.c found = nvme_process_cq(nvmeq, &start, &end, tag); start 1068 drivers/nvme/host/pci.c found = nvme_process_cq(nvmeq, &start, &end, tag); start 1072 drivers/nvme/host/pci.c nvme_complete_cqes(nvmeq, start, end); start 1079 drivers/nvme/host/pci.c u16 start, end; start 1086 drivers/nvme/host/pci.c found = nvme_process_cq(nvmeq, &start, &end, -1); start 1087 drivers/nvme/host/pci.c nvme_complete_cqes(nvmeq, start, end); start 1418 drivers/nvme/host/pci.c u16 start, end; start 1422 drivers/nvme/host/pci.c nvme_process_cq(&dev->queues[i], &start, &end, -1); start 1423 drivers/nvme/host/pci.c nvme_complete_cqes(&dev->queues[i], start, end); start 347 drivers/of/address.c res->start = port; start 355 drivers/of/address.c res->start = range->cpu_addr; start 357 drivers/of/address.c res->end = res->start + range->size - 1; start 361 drivers/of/address.c res->start = (resource_size_t)OF_BAD_ADDR; start 794 drivers/of/address.c r->start = taddr; start 838 drivers/of/address.c res.start == base_address) start 862 drivers/of/address.c return ioremap(res.start, resource_size(&res)); start 891 drivers/of/address.c if (!request_mem_region(res.start, resource_size(&res), name)) start 894 drivers/of/address.c mem = ioremap(res.start, resource_size(&res)); start 896 drivers/of/address.c release_mem_region(res.start, resource_size(&res)); start 2020 drivers/of/base.c const char *start = pp->name; start 2021 drivers/of/base.c const char *end = start + strlen(start); start 2038 drivers/of/base.c while (isdigit(*(end-1)) && end > start) start 2040 drivers/of/base.c len = end - start; start 2050 drivers/of/base.c ap->alias = start; start 2051 drivers/of/base.c of_alias_add(ap, np, id, start, len); start 856 drivers/of/fdt.c static void __early_init_dt_declare_initrd(unsigned long start, start 865 drivers/of/fdt.c initrd_start = (unsigned long)__va(start); start 877 drivers/of/fdt.c u64 start, end; start 886 drivers/of/fdt.c start = of_read_number(prop, len/4); start 893 drivers/of/fdt.c __early_init_dt_declare_initrd(start, end); start 894 drivers/of/fdt.c phys_initrd_start = start; start 895 drivers/of/fdt.c phys_initrd_size = end - start; start 898 drivers/of/fdt.c (unsigned long long)start, (unsigned long long)end); start 369 drivers/of/irq.c r->start = r->end = irq; start 65 drivers/of/of_numa.c r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1); start 30 drivers/of/of_reserved_mem.c phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, start 37 drivers/of/of_reserved_mem.c base = memblock_find_in_range(start, end, size, align); start 78 drivers/of/of_reserved_mem.c phys_addr_t start = 0, end = 0; start 130 drivers/of/of_reserved_mem.c start = dt_mem_next_cell(dt_root_addr_cells, &prop); start 131 drivers/of/of_reserved_mem.c end = start + dt_mem_next_cell(dt_root_size_cells, start 135 drivers/of/of_reserved_mem.c align, start, end, nomap, &base); start 312 drivers/of/platform.c if (res.start != auxdata->phys_addr) start 1004 drivers/of/unittest.c .start = 0xfffffff8, start 93 drivers/opp/ti-opp-supply.c base = ioremap_nocache(res->start, resource_size(res)); start 574 drivers/oprofile/buffer_sync.c void oprofile_put_buff(unsigned long *buf, unsigned int start, start 579 drivers/oprofile/buffer_sync.c i = start; start 148 drivers/oprofile/nmi_timer_int.c ops->start = nmi_timer_start; start 173 drivers/oprofile/oprof.c if ((err = oprofile_ops.start())) start 313 drivers/oprofile/oprofile_perf.c ops->start = oprofile_perf_start; start 117 drivers/oprofile/timer_int.c ops->start = oprofile_hrtimer_start; start 74 drivers/parisc/asp.c asp.version = gsc_readb(dev->hpa.start + ASP_VER_OFFSET) & 0xf; start 79 drivers/parisc/asp.c asp.name, asp.version, (unsigned long)dev->hpa.start); start 1359 drivers/parisc/ccio-dma.c res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16); start 1365 drivers/parisc/ccio-dma.c if (res->end + 1 == res->start) start 1376 drivers/parisc/ccio-dma.c __func__, (unsigned long)res->start, (unsigned long)res->end); start 1397 drivers/parisc/ccio-dma.c res->start = (max - size + 1) &~ (align - 1); start 1398 drivers/parisc/ccio-dma.c res->end = res->start + size; start 1413 drivers/parisc/ccio-dma.c unsigned long start, len; start 1418 drivers/parisc/ccio-dma.c start = (res->start - size) &~ (align - 1); start 1419 drivers/parisc/ccio-dma.c len = res->end - start + 1; start 1420 drivers/parisc/ccio-dma.c if (start >= min) { start 1421 drivers/parisc/ccio-dma.c if (!adjust_resource(res, start, len)) start 1425 drivers/parisc/ccio-dma.c start = res->start; start 1426 drivers/parisc/ccio-dma.c len = ((size + res->end + align) &~ (align - 1)) - start; start 1427 drivers/parisc/ccio-dma.c if (start + len <= max) { start 1428 drivers/parisc/ccio-dma.c if (!adjust_resource(res, start, len)) start 1462 drivers/parisc/ccio-dma.c __raw_writel(((parent->start)>>16) | 0xffff0000, start 1468 drivers/parisc/ccio-dma.c __raw_writel(((parent->start)>>16) | 0xffff0000, start 1488 drivers/parisc/ccio-dma.c } else if ((ioc->mmio_region->start <= res->start) && start 1491 drivers/parisc/ccio-dma.c } else if (((ioc->mmio_region + 1)->start <= res->start) && start 1529 drivers/parisc/ccio-dma.c (unsigned long)dev->hpa.start); start 1537 drivers/parisc/ccio-dma.c ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096); start 185 drivers/parisc/dino.c u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; start 220 drivers/parisc/dino.c u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; start 518 drivers/parisc/dino.c if (res->start == F_EXTEND(0xf0000000UL | (i * _8MB))) start 522 drivers/parisc/dino.c i, res->start, base_addr + DINO_IO_ADDR_EN); start 576 drivers/parisc/dino.c __func__, bus, bus->busn_res.start, start 600 drivers/parisc/dino.c bus->self->resource[i].end = bus->self->resource[i].end - bus->self->resource[i].start + DINO_BRIDGE_ALIGN; start 601 drivers/parisc/dino.c bus->self->resource[i].start = DINO_BRIDGE_ALIGN; start 757 drivers/parisc/dino.c unsigned long start, end; start 762 drivers/parisc/dino.c start = F_EXTEND(0xf0000000UL) | (i << 23); start 763 drivers/parisc/dino.c end = start + 8 * 1024 * 1024 - 1; start 766 drivers/parisc/dino.c start, end); start 768 drivers/parisc/dino.c if(prevres && prevres->end + 1 == start) { start 772 drivers/parisc/dino.c printk(KERN_ERR "%s is out of resource windows for range %d (0x%lx-0x%lx)\n", name, count, start, end); start 776 drivers/parisc/dino.c res->start = start; start 871 drivers/parisc/dino.c res->start = HBA_PORT_BASE(dino_dev->hba.hba_num); start 872 drivers/parisc/dino.c res->end = res->start + (HBA_PORT_SPACE_SIZE - 1); start 877 drivers/parisc/dino.c name, (unsigned long)res->start, (unsigned long)res->end, start 917 drivers/parisc/dino.c unsigned long hpa = dev->hpa.start; start 1004 drivers/parisc/dino.c dino_dev->hba.bus_num.start = dino_current_bus; start 306 drivers/parisc/eisa.c name, (unsigned long)dev->hpa.start); start 312 drivers/parisc/eisa.c eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000); start 321 drivers/parisc/eisa.c eisa_dev.hba.io_space.start = 0; start 93 drivers/parisc/eisa_enumerator.c res->start = mem_parent->start + get_24(buf+len+2); start 94 drivers/parisc/eisa_enumerator.c res->end = res->start + get_16(buf+len+5)*1024; start 183 drivers/parisc/eisa_enumerator.c res->start = get_16(buf+len+1); start 65 drivers/parisc/hppb.c card->hpa = dev->hpa.start; start 69 drivers/parisc/hppb.c card->mmio_region.start = gsc_readl(dev->hpa.start + IO_IO_LOW); start 70 drivers/parisc/hppb.c card->mmio_region.end = gsc_readl(dev->hpa.start + IO_IO_HIGH) - 1; start 75 drivers/parisc/hppb.c &dev->hpa.start, start 521 drivers/parisc/iosapic.c pcidev->bus->busn_res.start, intr_slot, intr_pin); start 174 drivers/parisc/lasi.c lasi->hpa = dev->hpa.start; start 169 drivers/parisc/lba_pci.c (long)r->start, (long)r->end, r->flags); start 191 drivers/parisc/lba_pci.c u8 first_bus = d->hba.hba_bus->busn_res.start; start 366 drivers/parisc/lba_pci.c u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; start 382 drivers/parisc/lba_pci.c if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) { start 433 drivers/parisc/lba_pci.c u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; start 446 drivers/parisc/lba_pci.c if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) { start 483 drivers/parisc/lba_pci.c u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; start 516 drivers/parisc/lba_pci.c u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; start 573 drivers/parisc/lba_pci.c unsigned long start = new->start; start 577 drivers/parisc/lba_pci.c if (end <= start || start < root->start || !tmp) start 581 drivers/parisc/lba_pci.c while (tmp && tmp->end < start) start 590 drivers/parisc/lba_pci.c if (tmp->start >= end) return 0; start 592 drivers/parisc/lba_pci.c if (tmp->start <= start) { start 594 drivers/parisc/lba_pci.c new->start = tmp->end + 1; start 604 drivers/parisc/lba_pci.c new->end = tmp->start - 1; start 609 drivers/parisc/lba_pci.c start, end, start 610 drivers/parisc/lba_pci.c (long)new->start, (long)new->end ); start 622 drivers/parisc/lba_pci.c extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len) start 631 drivers/parisc/lba_pci.c end - start, lba_len); start 635 drivers/parisc/lba_pci.c pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end); start 639 drivers/parisc/lba_pci.c if (end < start) /* fix overflow */ start 642 drivers/parisc/lba_pci.c pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end); start 647 drivers/parisc/lba_pci.c if (tmp->start == start) start 649 drivers/parisc/lba_pci.c if (tmp->end < start) start 651 drivers/parisc/lba_pci.c if (tmp->start > end) start 653 drivers/parisc/lba_pci.c if (end >= tmp->start) start 654 drivers/parisc/lba_pci.c end = tmp->start - 1; start 657 drivers/parisc/lba_pci.c pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end); start 678 drivers/parisc/lba_pci.c if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) { start 685 drivers/parisc/lba_pci.c r->start = r->end = 0; start 722 drivers/parisc/lba_pci.c bus, (int)bus->busn_res.start, bus->bridge->platform_data); start 740 drivers/parisc/lba_pci.c ldev->hba.io_space.start, ldev->hba.io_space.end, start 744 drivers/parisc/lba_pci.c ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end, start 760 drivers/parisc/lba_pci.c (long)ldev->hba.elmmio_space.start, start 773 drivers/parisc/lba_pci.c (long)ldev->hba.lmmio_space.start, start 785 drivers/parisc/lba_pci.c (long)ldev->hba.gmmio_space.start, start 805 drivers/parisc/lba_pci.c if (!res->start) start 1062 drivers/parisc/lba_pci.c unsigned long start; start 1073 drivers/parisc/lba_pci.c lba_dev->hba.bus_num.start = p->start; start 1085 drivers/parisc/lba_pci.c if ((p->end - p->start) != lba_len) start 1086 drivers/parisc/lba_pci.c p->end = extend_lmmio_len(p->start, start 1091 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1092 drivers/parisc/lba_pci.c lba_dev->hba.lmmio_space_offset = p->start - start 1093 drivers/parisc/lba_pci.c io->start; start 1099 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1108 drivers/parisc/lba_pci.c r->start = p->start; start 1117 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1120 drivers/parisc/lba_pci.c r->start = p->start; start 1129 drivers/parisc/lba_pci.c i, p->start); start 1137 drivers/parisc/lba_pci.c lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024); start 1140 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1143 drivers/parisc/lba_pci.c r->start = HBA_PORT_BASE(lba_dev->hba.hba_num); start 1144 drivers/parisc/lba_pci.c r->end = r->start + HBA_PORT_SPACE_SIZE - 1; start 1189 drivers/parisc/lba_pci.c r->start = lba_num & 0xff; start 1198 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1268 drivers/parisc/lba_pci.c r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE); start 1269 drivers/parisc/lba_pci.c if (r->start & 1) { start 1274 drivers/parisc/lba_pci.c r->start &= mmio_mask; start 1275 drivers/parisc/lba_pci.c r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start); start 1283 drivers/parisc/lba_pci.c r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start); start 1284 drivers/parisc/lba_pci.c r->end = r->start + rsize; start 1286 drivers/parisc/lba_pci.c r->end = r->start = 0; /* Not enabled. */ start 1307 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1314 drivers/parisc/lba_pci.c r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE); start 1316 drivers/parisc/lba_pci.c if (r->start & 1) { start 1320 drivers/parisc/lba_pci.c r->start &= mmio_mask; start 1321 drivers/parisc/lba_pci.c r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start); start 1323 drivers/parisc/lba_pci.c r->end = r->start + ~rsize; start 1329 drivers/parisc/lba_pci.c (int)lba_dev->hba.bus_num.start); start 1332 drivers/parisc/lba_pci.c r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L; start 1333 drivers/parisc/lba_pci.c r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1)); start 1337 drivers/parisc/lba_pci.c r->start |= lba_num; start 1479 drivers/parisc/lba_pci.c void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096); start 1498 drivers/parisc/lba_pci.c version, func_class & 0xf, (long)dev->hpa.start); start 1528 drivers/parisc/lba_pci.c minor, func_class, (long)dev->hpa.start); start 1533 drivers/parisc/lba_pci.c (long)dev->hpa.start); start 1538 drivers/parisc/lba_pci.c tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE); start 1586 drivers/parisc/lba_pci.c if (lba_dev->hba.bus_num.start < lba_next_bus) start 1587 drivers/parisc/lba_pci.c lba_dev->hba.bus_num.start = lba_next_bus; start 1601 drivers/parisc/lba_pci.c (long)lba_dev->hba.lmmio_space.start, start 1624 drivers/parisc/lba_pci.c pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start, start 1696 drivers/parisc/lba_pci.c void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096); start 1227 drivers/parisc/sba_iommu.c int rope_num = (lba->hpa.start >> 13) & 0xf; start 1516 drivers/parisc/sba_iommu.c return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); start 1592 drivers/parisc/sba_iommu.c sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; start 1604 drivers/parisc/sba_iommu.c sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; start 1610 drivers/parisc/sba_iommu.c sba_dev->iommu_resv.start = 0x40000000UL; start 1886 drivers/parisc/sba_iommu.c void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); start 1931 drivers/parisc/sba_iommu.c MODULE_NAME, version, (unsigned long long)dev->hpa.start); start 2022 drivers/parisc/sba_iommu.c r->start = r->end = 0; start 2038 drivers/parisc/sba_iommu.c r->start = (base & ~1UL) | PCI_F_EXTEND; start 2040 drivers/parisc/sba_iommu.c r->end = r->start + size; start 2065 drivers/parisc/sba_iommu.c r->start = r->end = 0; start 2073 drivers/parisc/sba_iommu.c r->start = (base & ~1UL) | PCI_F_EXTEND; start 2076 drivers/parisc/sba_iommu.c r->start += rope * (size + 1); /* adjust base for this rope */ start 2077 drivers/parisc/sba_iommu.c r->end = r->start + size; start 79 drivers/parisc/wax.c wax->hpa = dev->hpa.start; start 293 drivers/parport/parport_ax88796.c dd->io = request_mem_region(res->start, size, pdev->name); start 300 drivers/parport/parport_ax88796.c dd->base = ioremap(res->start, size); start 355 drivers/parport/parport_ax88796.c release_mem_region(dd->io->start, size); start 369 drivers/parport/parport_ax88796.c release_mem_region(dd->io->start, resource_size(dd->io)); start 140 drivers/parport/parport_cs.c p = parport_pc_probe_port(link->resource[0]->start, start 141 drivers/parport/parport_cs.c link->resource[1]->start, start 147 drivers/parport/parport_cs.c (unsigned int) link->resource[0]->start, start 351 drivers/parport/parport_gsc.c (unsigned long long)dev->hpa.start); start 355 drivers/parport/parport_gsc.c port = dev->hpa.start + PARPORT_GSC_OFFSET; start 300 drivers/parport/parport_mfc3.c unsigned long piabase = z->resource.start+PIABASE; start 579 drivers/parport/parport_pc.c unsigned long start = (unsigned long) buf; start 585 drivers/parport/parport_pc.c if ((start ^ end) & ~0xffffUL) start 586 drivers/parport/parport_pc.c maxlen = 0x10000 - (start & 0xffff); start 143 drivers/pci/bus.c if (r.start < region->start) start 144 drivers/pci/bus.c r.start = region->start; start 148 drivers/pci/bus.c if (r.end < r.start) start 149 drivers/pci/bus.c res->end = res->start - 1; start 195 drivers/pci/bus.c if (avail.start) start 196 drivers/pci/bus.c min_used = avail.start; start 270 drivers/pci/bus.c resource_size_t start, end; start 278 drivers/pci/bus.c start = max(r->start, res->start); start 281 drivers/pci/bus.c if (start > end) start 284 drivers/pci/bus.c if (res->start == start && res->end == end) start 287 drivers/pci/bus.c res->start = start; start 443 drivers/pci/controller/dwc/pci-dra7xx.c ep->phys_base = res->start; start 722 drivers/pci/controller/dwc/pci-dra7xx.c base = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 1112 drivers/pci/controller/dwc/pci-imx6.c if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) start 180 drivers/pci/controller/dwc/pci-keystone.c msi_target = ks_pcie->app.start + MSI_IRQ; start 403 drivers/pci/controller/dwc/pci-keystone.c u64 start = pp->mem->start; start 420 drivers/pci/controller/dwc/pci-keystone.c for (i = 0; i < num_viewport && (start < end); i++) { start 422 drivers/pci/controller/dwc/pci-keystone.c lower_32_bits(start) | OB_ENABLEN); start 424 drivers/pci/controller/dwc/pci-keystone.c upper_32_bits(start)); start 425 drivers/pci/controller/dwc/pci-keystone.c start += OB_WIN_SIZE * SZ_1M; start 490 drivers/pci/controller/dwc/pci-keystone.c dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); start 1010 drivers/pci/controller/dwc/pci-keystone.c ep->phys_base = res->start; start 105 drivers/pci/controller/dwc/pci-layerscape-ep.c ep->phys_base = res->start; start 169 drivers/pci/controller/dwc/pci-meson.c return devm_ioremap(dev, res->start, resource_size(res)); start 29 drivers/pci/controller/dwc/pcie-al.c if (bus->number == cfg->busr.start) { start 318 drivers/pci/controller/dwc/pcie-al.c target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask; start 323 drivers/pci/controller/dwc/pcie-al.c secondary_bus = pp->busn->start + 1; start 473 drivers/pci/controller/dwc/pcie-artpec6.c ep->phys_base = res->start; start 347 drivers/pci/controller/dwc/pcie-designware-ep.c .start = dw_pcie_ep_start, start 336 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_base = cfg_res->start; start 337 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_base = cfg_res->start + pp->cfg0_size; start 369 drivers/pci/controller/dwc/pcie-designware-host.c pp->io_bus_addr = pp->io->start - win->offset; start 376 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem_bus_addr = pp->mem->start - win->offset; start 382 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_base = pp->cfg->start; start 383 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_base = pp->cfg->start + pp->cfg0_size; start 393 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg->start, start 401 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem_base = pp->mem->start; start 484 drivers/pci/controller/dwc/pcie-designware-host.c pp->root_bus_nr = pp->busn->start; start 165 drivers/pci/controller/dwc/pcie-designware-plat.c ep->phys_base = res->start; start 56 drivers/pci/controller/dwc/pcie-designware.c static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, start 66 drivers/pci/controller/dwc/pcie-designware.c if (start) start 67 drivers/pci/controller/dwc/pcie-designware.c pos = start; start 78 drivers/pci/controller/dwc/pcie-designware.c if (PCI_EXT_CAP_ID(header) == cap && pos != start) start 32 drivers/pci/controller/dwc/pcie-hisi.c if (bus->number == cfg->busr.start) { start 50 drivers/pci/controller/dwc/pcie-hisi.c if (bus->number == cfg->busr.start) { start 68 drivers/pci/controller/dwc/pcie-hisi.c if (bus->number == cfg->busr.start) start 99 drivers/pci/controller/dwc/pcie-hisi.c reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); start 357 drivers/pci/controller/dwc/pcie-hisi.c reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); start 1179 drivers/pci/controller/dwc/pcie-tegra194.c appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, start 1203 drivers/pci/controller/dwc/pcie-tegra194.c pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, start 404 drivers/pci/controller/pci-aardvark.c u32 start, isr; start 406 drivers/pci/controller/pci-aardvark.c start = advk_readl(pcie, PIO_START); start 408 drivers/pci/controller/pci-aardvark.c if (!start && isr) start 979 drivers/pci/controller/pci-aardvark.c pcie->root_bus_nr = res->start; start 498 drivers/pci/controller/pci-ftpci100.c if (!faraday_res_to_memcfg(io->start - win->offset, start 84 drivers/pci/controller/pci-host-common.c bridge->busnr = cfg->busr.start; start 37 drivers/pci/controller/pci-host-generic.c if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0) start 1673 drivers/pci/controller/pci-hyperv.c low_base = hbus->low_mmio_res->start; start 1679 drivers/pci/controller/pci-hyperv.c high_base = hbus->high_mmio_res->start; start 2464 drivers/pci/controller/pci-hyperv.c vmbus_free_mmio(hbus->low_mmio_res->start, start 2470 drivers/pci/controller/pci-hyperv.c vmbus_free_mmio(hbus->high_mmio_res->start, start 2549 drivers/pci/controller/pci-hyperv.c vmbus_free_mmio(hbus->low_mmio_res->start, start 2593 drivers/pci/controller/pci-hyperv.c vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); start 2625 drivers/pci/controller/pci-hyperv.c d0_entry->mmio_base = hbus->mem_config->start; start 2941 drivers/pci/controller/pci-hyperv.c hbus->cfg_addr = ioremap(hbus->mem_config->start, start 390 drivers/pci/controller/pci-mvebu.c desired.base = port->pcie->io.start + desired.remap; start 679 drivers/pci/controller/pci-mvebu.c resource_size_t start, start 684 drivers/pci/controller/pci-mvebu.c return start; start 698 drivers/pci/controller/pci-mvebu.c return round_up(start, max_t(resource_size_t, SZ_64K, start 701 drivers/pci/controller/pci-mvebu.c return round_up(start, max_t(resource_size_t, SZ_1M, start 704 drivers/pci/controller/pci-mvebu.c return start; start 991 drivers/pci/controller/pci-mvebu.c pcie->realio.start = PCIBIOS_MIN_IO; start 1027 drivers/pci/controller/pci-mvebu.c pci_ioremap_io(i, pcie->io.start + i); start 252 drivers/pci/controller/pci-rcar-gen2.c val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM; start 262 drivers/pci/controller/pci-rcar-gen2.c val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; start 350 drivers/pci/controller/pci-rcar-gen2.c if (!mem_res || !mem_res->start) start 353 drivers/pci/controller/pci-rcar-gen2.c if (mem_res->start & 0xFFFF) start 387 drivers/pci/controller/pci-rcar-gen2.c priv->busnr = busnr.start; start 388 drivers/pci/controller/pci-rcar-gen2.c if (busnr.end != busnr.start) start 774 drivers/pci/controller/pci-tegra.c devm_release_mem_region(dev, port->regs.start, start 818 drivers/pci/controller/pci-tegra.c pci_remap_iospace(&pcie->pio, pcie->io.start); start 916 drivers/pci/controller/pci-tegra.c afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); start 922 drivers/pci/controller/pci-tegra.c axi_address = pcie->io.start; start 928 drivers/pci/controller/pci-tegra.c fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1; start 930 drivers/pci/controller/pci-tegra.c axi_address = pcie->prefetch.start; start 936 drivers/pci/controller/pci-tegra.c fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1; start 938 drivers/pci/controller/pci-tegra.c axi_address = pcie->mem.start; start 1542 drivers/pci/controller/pci-tegra.c pcie->cs.end = pcie->cs.start + SZ_4K - 1; start 2180 drivers/pci/controller/pci-tegra.c pcie->offset.io = res.start - range.pci_addr; start 2193 drivers/pci/controller/pci-tegra.c pcie->io.start = range.cpu_addr; start 2208 drivers/pci/controller/pci-tegra.c pcie->offset.mem = res.start - range.pci_addr; start 2225 drivers/pci/controller/pci-tegra.c pcie->busn.start = 0; start 2707 drivers/pci/controller/pci-tegra.c .start = tegra_pcie_ports_seq_start, start 2810 drivers/pci/controller/pci-tegra.c host->busnr = pcie->busn.start; start 119 drivers/pci/controller/pci-thunder-ecam.c node_bits = (cfg->res.start >> 32) & (1 << 12); start 133 drivers/pci/controller/pci-thunder-pem.c if (bus->number < cfg->busr.start || start 141 drivers/pci/controller/pci-thunder-pem.c if (bus->number == cfg->busr.start) start 282 drivers/pci/controller/pci-thunder-pem.c if (bus->number < cfg->busr.start || start 289 drivers/pci/controller/pci-thunder-pem.c if (bus->number == cfg->busr.start) start 306 drivers/pci/controller/pci-thunder-pem.c pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000); start 316 drivers/pci/controller/pci-thunder-pem.c bar4_start = res_pem->start + 0xf00000; start 336 drivers/pci/controller/pci-thunder-pem.c resource_size_t start = r->start, end = r->end; start 344 drivers/pci/controller/pci-thunder-pem.c res = request_mem_region(start, end - start + 1, regionid); start 365 drivers/pci/controller/pci-thunder-pem.c res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | start 394 drivers/pci/controller/pci-thunder-pem.c res_pem->end = res_pem->start + SZ_64K - 1; start 396 drivers/pci/controller/pci-thunder-pem.c res_pem->end = res_pem->start + SZ_16M - 1; start 537 drivers/pci/controller/pci-v3-semi.c v3->io_bus_addr = io->start - win->offset; start 558 drivers/pci/controller/pci-v3-semi.c v3->pre_mem = mem->start; start 559 drivers/pci/controller/pci-v3-semi.c v3->pre_bus_addr = mem->start - win->offset; start 567 drivers/pci/controller/pci-v3-semi.c (mem->start != v3->non_pre_mem + SZ_256M)) { start 583 drivers/pci/controller/pci-v3-semi.c v3->non_pre_mem = mem->start; start 584 drivers/pci/controller/pci-v3-semi.c v3->non_pre_bus_addr = mem->start - win->offset; start 604 drivers/pci/controller/pci-v3-semi.c host->busnr = win->res->start; start 781 drivers/pci/controller/pci-v3-semi.c if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16)) start 791 drivers/pci/controller/pci-v3-semi.c v3->config_mem = regs->start; start 95 drivers/pci/controller/pci-versatile.c writel(res->start >> 28, PCI_IMAP(mem)); start 462 drivers/pci/controller/pci-xgene-msi.c xgene_msi->msi_addr = res->start; start 362 drivers/pci/controller/pci-xgene.c port->cfg_addr = res->start; start 425 drivers/pci/controller/pci-xgene.c res->start - window->offset); start 433 drivers/pci/controller/pci-xgene.c res->start, start 434 drivers/pci/controller/pci-xgene.c res->start - start 438 drivers/pci/controller/pci-xgene.c res->start, start 439 drivers/pci/controller/pci-xgene.c res->start - start 246 drivers/pci/controller/pcie-altera-msi.c msi->vector_phy = res->start; start 423 drivers/pci/controller/pcie-cadence-ep.c .start = cdns_pcie_ep_start, start 505 drivers/pci/controller/pcie-cadence-ep.c ret = pci_epc_mem_init(epc, pcie->mem_res->start, start 50 drivers/pci/controller/pcie-cadence-host.c if (busn == rc->bus_range->start) { start 80 drivers/pci/controller/pcie-cadence-host.c if (busn == rc->bus_range->start + 1) start 156 drivers/pci/controller/pcie-cadence-host.c desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); start 160 drivers/pci/controller/pcie-cadence-host.c cpu_addr = cfg_res->start - mem_res->start; start 219 drivers/pci/controller/pcie-cadence-host.c rc->pcie.bus = bus_range->start; start 76 drivers/pci/controller/pcie-cadence.c cpu_addr -= pcie->mem_res->start; start 103 drivers/pci/controller/pcie-cadence.c cpu_addr -= pcie->mem_res->start; start 59 drivers/pci/controller/pcie-iproc-bcma.c pcie->mem.start = bdev->addr_s[0]; start 66 drivers/pci/controller/pcie-iproc-platform.c pcie->base = devm_pci_remap_cfgspace(dev, reg.start, start 72 drivers/pci/controller/pcie-iproc-platform.c pcie->base_addr = reg.start; start 1035 drivers/pci/controller/pcie-iproc.c ret = iproc_pcie_setup_ob(pcie, res->start, start 1036 drivers/pci/controller/pcie-iproc.c res->start - window->offset, start 1198 drivers/pci/controller/pcie-iproc.c if (tmp->res->start < range->cpu_addr) start 1202 drivers/pci/controller/pcie-iproc.c res->start = range->cpu_addr; start 1203 drivers/pci/controller/pcie-iproc.c res->end = res->start + range->size - 1; start 1209 drivers/pci/controller/pcie-iproc.c entry->offset = res->start - range->cpu_addr; start 1272 drivers/pci/controller/pcie-iproc.c *msi_addr = res.start + GITS_TRANSLATER; start 721 drivers/pci/controller/pcie-mediatek.c val = lower_32_bits(mem->start) | start 725 drivers/pci/controller/pcie-mediatek.c val = upper_32_bits(mem->start); start 1055 drivers/pci/controller/pcie-mediatek.c pcie->busnr = win->res->start; start 444 drivers/pci/controller/pcie-mobiveil.c pcie->pcie_reg_base = res->start; start 632 drivers/pci/controller/pcie-mobiveil.c program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0, start 649 drivers/pci/controller/pcie-mobiveil.c win->res->start, start 650 drivers/pci/controller/pcie-mobiveil.c win->res->start - win->offset, start 356 drivers/pci/controller/pcie-rcar.c res_start = pci_pio_to_address(res->start); start 358 drivers/pci/controller/pcie-rcar.c res_start = res->start; start 391 drivers/pci/controller/pcie-rcar.c pci->root_bus_nr = res->start; start 82 drivers/pci/controller/pcie-rockchip-ep.c cpu_addr -= rockchip->mem_res->start; start 526 drivers/pci/controller/pcie-rockchip-ep.c .start = rockchip_pcie_ep_start, start 617 drivers/pci/controller/pcie-rockchip-ep.c err = pci_epc_mem_init(epc, rockchip->mem_res->start, start 1014 drivers/pci/controller/pcie-rockchip-host.c rockchip->io_bus_addr = io->start - win->offset; start 1027 drivers/pci/controller/pcie-rockchip-host.c rockchip->mem_bus_addr = mem->start - win->offset; start 1030 drivers/pci/controller/pcie-rockchip-host.c rockchip->root_bus_nr = win->res->start; start 270 drivers/pci/controller/pcie-tango.c pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL; start 784 drivers/pci/controller/pcie-xilinx-nwl.c pcie->phys_breg_base = res->start; start 790 drivers/pci/controller/pcie-xilinx-nwl.c pcie->phys_pcie_reg_base = res->start; start 796 drivers/pci/controller/pcie-xilinx-nwl.c pcie->phys_ecam_base = res->start; start 595 drivers/pci/controller/vmd.c offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - start 597 drivers/pci/controller/vmd.c offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - start 620 drivers/pci/controller/vmd.c .start = vmd->busn_start, start 649 drivers/pci/controller/vmd.c .start = res->start, start 662 drivers/pci/controller/vmd.c .start = res->start + membar2_offset, start 36 drivers/pci/ecam.c if (busr->start > busr->end) start 45 drivers/pci/ecam.c cfg->busr.start = busr->start; start 52 drivers/pci/ecam.c cfg->busr.end = busr->start + bus_range - 1; start 58 drivers/pci/ecam.c cfg->res.start = cfgres->start; start 77 drivers/pci/ecam.c pci_remap_cfgspace(cfgres->start + i * bsz, start 83 drivers/pci/ecam.c cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz); start 136 drivers/pci/ecam.c if (busn < cfg->busr.start || busn > cfg->busr.end) start 139 drivers/pci/ecam.c busn -= cfg->busr.start; start 31 drivers/pci/endpoint/pci-ep-cfs.c bool start; start 49 drivers/pci/endpoint/pci-ep-cfs.c bool start; start 55 drivers/pci/endpoint/pci-ep-cfs.c ret = kstrtobool(page, &start); start 59 drivers/pci/endpoint/pci-ep-cfs.c if (!start) { start 70 drivers/pci/endpoint/pci-ep-cfs.c epc_group->start = start; start 78 drivers/pci/endpoint/pci-ep-cfs.c to_pci_epc_group(item)->start); start 81 drivers/pci/endpoint/pci-ep-cfs.c CONFIGFS_ATTR(pci_epc_, start); start 133 drivers/pci/endpoint/pci-ep-cfs.c WARN_ON_ONCE(epc_group->start); start 172 drivers/pci/endpoint/pci-epc-core.c if (!epc->ops->start) start 176 drivers/pci/endpoint/pci-epc-core.c ret = epc->ops->start(epc); start 64 drivers/pci/host-bridge.c region->start = res->start - offset; start 72 drivers/pci/host-bridge.c return region1->start <= region2->start && region1->end >= region2->end; start 88 drivers/pci/host-bridge.c bus_region.start = window->res->start - window->offset; start 97 drivers/pci/host-bridge.c res->start = region->start + offset; start 199 drivers/pci/hotplug/acpiphp_glue.c if (((buses >> 8) & 0xff) != bus->busn_res.start) { start 202 drivers/pci/hotplug/acpiphp_glue.c | ((unsigned int)(bus->busn_res.start) << 8) start 375 drivers/pci/hotplug/acpiphp_glue.c max = bus->busn_res.start; start 443 drivers/pci/hotplug/acpiphp_glue.c max = bus->busn_res.start; start 734 drivers/pci/hotplug/acpiphp_glue.c if ((res->flags & type_mask) && !res->start && start 323 drivers/pci/hotplug/ibmphp.h u32 start; start 350 drivers/pci/hotplug/ibmphp.h u32 start; start 406 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->io[count]->start); start 409 drivers/pci/hotplug/ibmphp_pci.c debug("b4 writing, the IO address is %x\n", func->io[count]->start); start 453 drivers/pci/hotplug/ibmphp_pci.c pfmem[count]->start = mem_tmp->start; start 466 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); start 469 drivers/pci/hotplug/ibmphp_pci.c debug("b4 writing, start address is %x\n", func->pfmem[count]->start); start 507 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->mem[count]->start); start 509 drivers/pci/hotplug/ibmphp_pci.c debug("b4 writing, start address is %x\n", func->mem[count]->start); start 673 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->io[count]->start); start 723 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); start 759 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->mem[count]->start); start 932 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8); start 952 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16); start 967 drivers/pci/hotplug/ibmphp_pci.c pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, 0x0000 | bus->rangePFMem->start >> 16); start 1242 drivers/pci/hotplug/ibmphp_pci.c debug("io->start = %x\n", io->start); start 1252 drivers/pci/hotplug/ibmphp_pci.c debug("io->start = %x\n", io->start); start 1271 drivers/pci/hotplug/ibmphp_pci.c debug("pfmem->start = %x\n", pfmem->start); start 1285 drivers/pci/hotplug/ibmphp_pci.c debug("mem->start = %x\n", mem->start); start 1374 drivers/pci/hotplug/ibmphp_pci.c debug("io->start = %x\n", io->start); start 1389 drivers/pci/hotplug/ibmphp_pci.c debug("pfmem->start = %x\n", pfmem->start); start 1401 drivers/pci/hotplug/ibmphp_pci.c debug("mem->start = %x\n", mem->start); start 1627 drivers/pci/hotplug/ibmphp_pci.c io_range->start = io->start; start 1638 drivers/pci/hotplug/ibmphp_pci.c mem_range->start = mem->start; start 1649 drivers/pci/hotplug/ibmphp_pci.c pfmem_range->start = pfmem->start; start 71 drivers/pci/hotplug/ibmphp_res.c rs->start = curr->start_addr; start 110 drivers/pci/hotplug/ibmphp_res.c newrange->start = curr->start_addr; start 118 drivers/pci/hotplug/ibmphp_res.c debug("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); start 127 drivers/pci/hotplug/ibmphp_res.c debug("First Memory Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 137 drivers/pci/hotplug/ibmphp_res.c debug("First IO Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 147 drivers/pci/hotplug/ibmphp_res.c debug("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 210 drivers/pci/hotplug/ibmphp_res.c debug("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 225 drivers/pci/hotplug/ibmphp_res.c debug("New Bus, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 236 drivers/pci/hotplug/ibmphp_res.c debug("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 250 drivers/pci/hotplug/ibmphp_res.c debug("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 261 drivers/pci/hotplug/ibmphp_res.c debug("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 274 drivers/pci/hotplug/ibmphp_res.c debug("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); start 305 drivers/pci/hotplug/ibmphp_res.c debug("Memory resource for device %x, bus %x, [%x - %x]\n", new_mem->devfunc, new_mem->busno, new_mem->start, new_mem->end); start 323 drivers/pci/hotplug/ibmphp_res.c debug("PFMemory resource for device %x, bus %x, [%x - %x]\n", new_pfmem->devfunc, new_pfmem->busno, new_pfmem->start, new_pfmem->end); start 346 drivers/pci/hotplug/ibmphp_res.c debug("IO resource for device %x, bus %x, [%x - %x]\n", new_io->devfunc, new_io->busno, new_io->start, new_io->end); start 392 drivers/pci/hotplug/ibmphp_res.c if (range->start < range_cur->start) start 505 drivers/pci/hotplug/ibmphp_res.c if ((res->start >= range->start) && (res->end <= range->end)) { start 617 drivers/pci/hotplug/ibmphp_res.c if ((res->start >= range_cur->start) && (res->end <= range_cur->end)) { start 678 drivers/pci/hotplug/ibmphp_res.c debug("i should be here, [%x - %x]\n", res->start, res->end); start 685 drivers/pci/hotplug/ibmphp_res.c if (res->start < res_cur->start) start 696 drivers/pci/hotplug/ibmphp_res.c } else if (res->start < res_cur->start) { start 797 drivers/pci/hotplug/ibmphp_res.c if ((res_cur->start == res->start) && (res_cur->end == res->end)) start 817 drivers/pci/hotplug/ibmphp_res.c if ((res_cur->start == res->start) && (res_cur->end == res->end)) { start 820 drivers/pci/hotplug/ibmphp_res.c if ((mem_cur->start == res_cur->start) start 1022 drivers/pci/hotplug/ibmphp_res.c len_tmp = res_cur->start - 1 - range->start; start 1024 drivers/pci/hotplug/ibmphp_res.c if ((res_cur->start != range->start) && (len_tmp >= res->len)) { start 1029 drivers/pci/hotplug/ibmphp_res.c if ((range->start % tmp_divide) == 0) { start 1033 drivers/pci/hotplug/ibmphp_res.c start_cur = range->start; start 1036 drivers/pci/hotplug/ibmphp_res.c tmp_start = range->start; start 1039 drivers/pci/hotplug/ibmphp_res.c while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { start 1047 drivers/pci/hotplug/ibmphp_res.c if (tmp_start >= res_cur->start - 1) start 1054 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1056 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1093 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1095 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1105 drivers/pci/hotplug/ibmphp_res.c len_tmp = res_cur->start - 1 - range->start; start 1107 drivers/pci/hotplug/ibmphp_res.c if ((res_cur->start != range->start) && (len_tmp >= res->len)) { start 1109 drivers/pci/hotplug/ibmphp_res.c if ((range->start % tmp_divide) == 0) { start 1113 drivers/pci/hotplug/ibmphp_res.c start_cur = range->start; start 1116 drivers/pci/hotplug/ibmphp_res.c tmp_start = range->start; start 1119 drivers/pci/hotplug/ibmphp_res.c while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { start 1127 drivers/pci/hotplug/ibmphp_res.c if (tmp_start >= res_cur->start - 1) start 1133 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1135 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1142 drivers/pci/hotplug/ibmphp_res.c len_tmp = res_cur->start - 1 - res_prev->end - 1; start 1156 drivers/pci/hotplug/ibmphp_res.c while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { start 1164 drivers/pci/hotplug/ibmphp_res.c if (tmp_start >= res_cur->start - 1) start 1170 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1172 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1203 drivers/pci/hotplug/ibmphp_res.c len_tmp = range->end - range->start; start 1207 drivers/pci/hotplug/ibmphp_res.c if ((range->start % tmp_divide) == 0) { start 1211 drivers/pci/hotplug/ibmphp_res.c start_cur = range->start; start 1214 drivers/pci/hotplug/ibmphp_res.c tmp_start = range->start; start 1231 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1233 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1246 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1248 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1269 drivers/pci/hotplug/ibmphp_res.c len_tmp = range->end - range->start; start 1273 drivers/pci/hotplug/ibmphp_res.c if ((range->start % tmp_divide) == 0) { start 1277 drivers/pci/hotplug/ibmphp_res.c start_cur = range->start; start 1280 drivers/pci/hotplug/ibmphp_res.c tmp_start = range->start; start 1297 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1299 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1312 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1314 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1320 drivers/pci/hotplug/ibmphp_res.c res->start = start_cur; start 1322 drivers/pci/hotplug/ibmphp_res.c res->end = res->start + res->len - 1; start 1433 drivers/pci/hotplug/ibmphp_res.c if (ibmphp_find_resource(bus_prev, range_cur->start, &res, IO) < 0) start 1447 drivers/pci/hotplug/ibmphp_res.c if (ibmphp_find_resource(bus_prev, range_cur->start, &res, MEM) < 0) start 1461 drivers/pci/hotplug/ibmphp_res.c if (ibmphp_find_resource(bus_prev, range_cur->start, &res, PFMEM) < 0) start 1508 drivers/pci/hotplug/ibmphp_res.c if (res_cur->start == start_address) { start 1522 drivers/pci/hotplug/ibmphp_res.c if (res_cur->start == start_address) { start 1539 drivers/pci/hotplug/ibmphp_res.c debug("*res->start = %x\n", (*res)->start); start 1697 drivers/pci/hotplug/ibmphp_res.c mem->start = pfmem_cur->start; start 1777 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x]\n", range->start, range->end); start 1787 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x]\n", range->start, range->end); start 1798 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x]\n", range->start, range->end); start 1811 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); start 1826 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); start 1841 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); start 1857 drivers/pci/hotplug/ibmphp_res.c debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); start 1884 drivers/pci/hotplug/ibmphp_res.c if ((range_cur->start == range->start) && (range_cur->end == range->end)) start 1976 drivers/pci/hotplug/ibmphp_res.c range->start = start_address; start 2004 drivers/pci/hotplug/ibmphp_res.c io->start = start_address; start 2006 drivers/pci/hotplug/ibmphp_res.c io->len = io->end - io->start + 1; start 2023 drivers/pci/hotplug/ibmphp_res.c range->start = start_address; start 2052 drivers/pci/hotplug/ibmphp_res.c mem->start = start_address; start 2054 drivers/pci/hotplug/ibmphp_res.c mem->len = mem->end - mem->start + 1; start 2075 drivers/pci/hotplug/ibmphp_res.c range->start = start_address; start 2103 drivers/pci/hotplug/ibmphp_res.c pfmem->start = start_address; start 2105 drivers/pci/hotplug/ibmphp_res.c pfmem->len = pfmem->end - pfmem->start + 1; start 40 drivers/pci/hotplug/shpchp_sysfs.c (unsigned long long)res->start, start 49 drivers/pci/hotplug/shpchp_sysfs.c (unsigned long long)res->start, start 57 drivers/pci/hotplug/shpchp_sysfs.c (unsigned long long)res->start, start 62 drivers/pci/hotplug/shpchp_sysfs.c for (busnr = bus->busn_res.start; busnr <= bus->busn_res.end; busnr++) { start 179 drivers/pci/iov.c virtfn->resource[i].start = res->start + size * id; start 180 drivers/pci/iov.c virtfn->resource[i].end = virtfn->resource[i].start + size - 1; start 650 drivers/pci/iov.c res->end = res->start + resource_size(res) * total - 1; start 829 drivers/pci/iov.c new = region.start; start 835 drivers/pci/iov.c new = region.start >> 16 >> 16; start 26 drivers/pci/mmap.c resource_size_t start, end; start 28 drivers/pci/mmap.c pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); start 31 drivers/pci/mmap.c vma->vm_pgoff -= start >> PAGE_SHIFT; start 85 drivers/pci/mmap.c resource_size_t start, end; start 92 drivers/pci/mmap.c pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); start 93 drivers/pci/mmap.c vma->vm_pgoff += start >> PAGE_SHIFT; start 184 drivers/pci/of.c res->start = bus_range[0]; start 281 drivers/pci/of.c bus_range->start = busno; start 287 drivers/pci/of.c if (bus_range->end > bus_range->start + bus_max) start 288 drivers/pci/of.c bus_range->end = bus_range->start + bus_max; start 340 drivers/pci/of.c pci_add_resource_offset(resources, res, res->start - range.pci_addr); start 188 drivers/pci/p2pdma.c pgmap->res.start = pci_resource_start(pdev, bar) + offset; start 189 drivers/pci/p2pdma.c pgmap->res.end = pgmap->res.start + size - 1; start 135 drivers/pci/pci-sysfs.c resource_size_t start, end; start 144 drivers/pci/pci-sysfs.c pci_resource_to_user(pci_dev, i, res, &start, &end); start 146 drivers/pci/pci-sysfs.c (unsigned long long)start, start 991 drivers/pci/pci-sysfs.c unsigned long nr, start, size; start 997 drivers/pci/pci-sysfs.c start = vma->vm_pgoff; start 1004 drivers/pci/pci-sysfs.c if (start >= pci_start && start < pci_start + size && start 1005 drivers/pci/pci-sysfs.c start + nr <= pci_start + size) start 1032 drivers/pci/pci-sysfs.c if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) start 179 drivers/pci/pci.c return ioremap_nocache(res->start, resource_size(res)); start 490 drivers/pci/pci.c int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap) start 502 drivers/pci/pci.c if (start) start 503 drivers/pci/pci.c pos = start; start 516 drivers/pci/pci.c if (PCI_EXT_CAP_ID(header) == cap && pos != start) start 680 drivers/pci/pci.c if (r->start && resource_contains(r, res)) start 1389 drivers/pci/pci.c int start, int end, int retry, start 1394 drivers/pci/pci.c for (index = end; index >= start; index--) start 2899 drivers/pci/pci.c resource_size_t start, end; start 2940 drivers/pci/pci.c start = (base & PCI_EA_FIELD_MASK); start 2961 drivers/pci/pci.c start |= ((u64)base_upper << 32); start 2964 drivers/pci/pci.c end = start + (max_offset | 0x03); start 2983 drivers/pci/pci.c if (end < start) { start 2995 drivers/pci/pci.c res->start = start; start 3400 drivers/pci/pci.c bool pci_acs_path_enabled(struct pci_dev *start, start 3403 drivers/pci/pci.c struct pci_dev *pdev, *parent = start; start 3941 drivers/pci/pci.c unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; start 3973 drivers/pci/pci.c unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; start 4084 drivers/pci/pci.c if (!devm_request_mem_region(dev, res->start, size, name)) { start 4089 drivers/pci/pci.c dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); start 4092 drivers/pci/pci.c devm_release_mem_region(dev, res->start, size); start 6092 drivers/pci/pci.c resource_size_t *start, resource_size_t *end) start 6094 drivers/pci/pci.c *start = rsrc->start; start 6212 drivers/pci/pci.c r->start = 0; start 6217 drivers/pci/pci.c r->start = align; start 6218 drivers/pci/pci.c r->end = r->start + size - 1; start 6277 drivers/pci/pci.c r->start = 0; start 28 drivers/pci/probe.c .start = 0, start 58 drivers/pci/probe.c r->res.start = 0; start 265 drivers/pci/probe.c res->start = 0; start 275 drivers/pci/probe.c res->start = 0; start 283 drivers/pci/probe.c region.start = l64; start 300 drivers/pci/probe.c if (inverted_region.start != region.start) { start 302 drivers/pci/probe.c res->start = 0; start 303 drivers/pci/probe.c res->end = region.end - region.start; start 305 drivers/pci/probe.c pos, (unsigned long long)region.start); start 430 drivers/pci/probe.c region.start = base; start 452 drivers/pci/probe.c region.start = base; start 505 drivers/pci/probe.c region.start = base; start 849 drivers/pci/probe.c bus->number = bus->busn_res.start = bridge->busnr; start 919 drivers/pci/probe.c (unsigned long long)(res->start - offset), start 1007 drivers/pci/probe.c child->number = child->busn_res.start = busnr; start 1008 drivers/pci/probe.c child->primary = parent->busn_res.start; start 1284 drivers/pci/probe.c | ((unsigned int)(child->busn_res.start) << 8) start 1811 drivers/pci/probe.c region.start = 0x1F0; start 1818 drivers/pci/probe.c region.start = 0x3F6; start 1827 drivers/pci/probe.c region.start = 0x170; start 1834 drivers/pci/probe.c region.start = 0x376; start 2739 drivers/pci/probe.c unsigned int start = bus->busn_res.start; start 2740 drivers/pci/probe.c unsigned int devfn, fn, cmax, max = start; start 2846 drivers/pci/probe.c if (max - start < used_buses) { start 2847 drivers/pci/probe.c max = start + used_buses; start 2854 drivers/pci/probe.c &bus->busn_res, max - start); start 2969 drivers/pci/probe.c res->start = bus; start 2998 drivers/pci/probe.c if (res->start > bus_max) start 3001 drivers/pci/probe.c size = bus_max - res->start + 1; start 3002 drivers/pci/probe.c ret = adjust_resource(res, res->start, size); start 3007 drivers/pci/probe.c pci_bus_insert_busn_res(b, res->start, res->end); start 3207 drivers/pci/probe.c int busnr, start = parent->busn_res.start; start 3211 drivers/pci/probe.c for (busnr = start; busnr <= end; busnr++) { start 375 drivers/pci/proc.c resource_size_t start, end; start 376 drivers/pci/proc.c pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); start 378 drivers/pci/proc.c (unsigned long long)(start | start 382 drivers/pci/proc.c resource_size_t start, end; start 383 drivers/pci/proc.c pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); start 385 drivers/pci/proc.c dev->resource[i].start < dev->resource[i].end ? start 386 drivers/pci/proc.c (unsigned long long)(end - start) + 1 : 0); start 396 drivers/pci/proc.c .start = pci_seq_start, start 101 drivers/pci/quirks.c struct pci_fixup *start, *end; start 105 drivers/pci/quirks.c start = __start_pci_fixups_early; start 110 drivers/pci/quirks.c start = __start_pci_fixups_header; start 117 drivers/pci/quirks.c start = __start_pci_fixups_final; start 122 drivers/pci/quirks.c start = __start_pci_fixups_enable; start 127 drivers/pci/quirks.c start = __start_pci_fixups_resume; start 132 drivers/pci/quirks.c start = __start_pci_fixups_resume_early; start 137 drivers/pci/quirks.c start = __start_pci_fixups_suspend; start 142 drivers/pci/quirks.c start = __start_pci_fixups_suspend_late; start 150 drivers/pci/quirks.c pci_do_fixups(dev, start, end); start 482 drivers/pci/quirks.c r->start = 0; start 499 drivers/pci/quirks.c if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { start 501 drivers/pci/quirks.c r->start = 0; start 527 drivers/pci/quirks.c bus_region.start = region; start 575 drivers/pci/quirks.c bus_region.start = region; start 1226 drivers/pci/quirks.c r->start = 0; start 2157 drivers/pci/quirks.c if (r->start & 0x8) { start 2159 drivers/pci/quirks.c r->start = 0; start 2188 drivers/pci/quirks.c r->start = 0; start 3324 drivers/pci/quirks.c dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1; start 3330 drivers/pci/quirks.c dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1; start 45 drivers/pci/rom.c rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; start 139 drivers/pci/rom.c loff_t start; start 146 drivers/pci/rom.c start = pci_resource_start(pdev, PCI_ROM_RESOURCE); start 155 drivers/pci/rom.c rom = ioremap(start, *size); start 34 drivers/pci/setup-bus.c resource_size_t start; start 70 drivers/pci/setup-bus.c tmp->start = res->start; start 197 drivers/pci/setup-bus.c res->start = 0; start 244 drivers/pci/setup-bus.c res->start = align; start 245 drivers/pci/setup-bus.c res->end = res->start + add_size - 1; start 410 drivers/pci/setup-bus.c if (add_align > dev_res->res->start) { start 413 drivers/pci/setup-bus.c dev_res->res->start = add_align; start 464 drivers/pci/setup-bus.c res->start = save_res->start; start 522 drivers/pci/setup-bus.c region.start); start 532 drivers/pci/setup-bus.c region.start); start 542 drivers/pci/setup-bus.c region.start); start 552 drivers/pci/setup-bus.c region.start); start 590 drivers/pci/setup-bus.c io_base_lo = (region.start >> 8) & io_mask; start 594 drivers/pci/setup-bus.c io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); start 619 drivers/pci/setup-bus.c l = (region.start >> 16) & 0xfff0; start 646 drivers/pci/setup-bus.c l = (region.start >> 16) & 0xfff0; start 649 drivers/pci/setup-bus.c bu = upper_32_bits(region.start); start 911 drivers/pci/setup-bus.c if (b_res->start || b_res->end) start 918 drivers/pci/setup-bus.c b_res->start = min_align; start 919 drivers/pci/setup-bus.c b_res->end = b_res->start + size0 - 1; start 1012 drivers/pci/setup-bus.c r->end = r->start - 1; start 1060 drivers/pci/setup-bus.c if (b_res->start || b_res->end) start 1066 drivers/pci/setup-bus.c b_res->start = min_align; start 1102 drivers/pci/setup-bus.c b_res[0].start = pci_cardbus_io_size; start 1103 drivers/pci/setup-bus.c b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; start 1114 drivers/pci/setup-bus.c b_res[1].start = pci_cardbus_io_size; start 1115 drivers/pci/setup-bus.c b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1; start 1147 drivers/pci/setup-bus.c b_res[2].start = pci_cardbus_mem_size; start 1148 drivers/pci/setup-bus.c b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1; start 1164 drivers/pci/setup-bus.c b_res[3].start = pci_cardbus_mem_size; start 1165 drivers/pci/setup-bus.c b_res[3].end = b_res[3].start + b_res_3_size - 1; start 1532 drivers/pci/setup-bus.c r->start = 0; start 1674 drivers/pci/setup-bus.c if (!region.start) { start 1790 drivers/pci/setup-bus.c res->start = fail_res->start; start 2048 drivers/pci/setup-bus.c res->start = fail_res->start; start 2105 drivers/pci/setup-bus.c res->start = 0; start 2144 drivers/pci/setup-bus.c res->start = dev_res->start; start 2157 drivers/pci/setup-bus.c res->start = dev_res->start; start 57 drivers/pci/setup-res.c new = region.start; start 107 drivers/pci/setup-res.c new = region.start >> 16 >> 16; start 198 drivers/pci/setup-res.c resource_size_t fw_addr, start, end; start 204 drivers/pci/setup-res.c start = res->start; start 206 drivers/pci/setup-res.c res->start = fw_addr; start 207 drivers/pci/setup-res.c res->end = res->start + size - 1; start 224 drivers/pci/setup-res.c res->start = start; start 242 drivers/pci/setup-res.c return res->start; start 404 drivers/pci/setup-res.c res->start = 0; start 439 drivers/pci/setup-res.c res->end = res->start + pci_rebar_size_to_bytes(size) - 1; start 450 drivers/pci/setup-res.c res->end = res->start + pci_rebar_size_to_bytes(old) - 1; start 402 drivers/pci/xen-pcifront.c if (!r->parent && r->start && r->flags) { start 454 drivers/pci/xen-pcifront.c .start = 0, start 175 drivers/pcmcia/at91_cf.c io->start = cf->socket.io_offset; start 176 drivers/pcmcia/at91_cf.c io->stop = io->start + SZ_2K - 1; start 273 drivers/pcmcia/at91_cf.c cf->phys_baseaddr = io->start; start 327 drivers/pcmcia/at91_cf.c if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), "at91_cf")) { start 310 drivers/pcmcia/bcm63xx_pcmcia.c map->static_start = res->start + map->card_start; start 355 drivers/pcmcia/bcm63xx_pcmcia.c if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) { start 361 drivers/pcmcia/bcm63xx_pcmcia.c skt->base = ioremap(res->start, regmem_size); start 370 drivers/pcmcia/bcm63xx_pcmcia.c skt->io_base = ioremap(res->start, iomem_size); start 383 drivers/pcmcia/bcm63xx_pcmcia.c sock->pci_irq = irq_res->start; start 434 drivers/pcmcia/bcm63xx_pcmcia.c release_mem_region(skt->reg_res->start, regmem_size); start 449 drivers/pcmcia/bcm63xx_pcmcia.c release_mem_region(res->start, resource_size(res)); start 75 drivers/pcmcia/cardbus.c max = bus->busn_res.start; start 102 drivers/pcmcia/cistpl.c s->cis_virt = ioremap(mem->res->start, s->map_size); start 80 drivers/pcmcia/cs_internal.h extern struct resource *pcmcia_make_resource(resource_size_t start, start 395 drivers/pcmcia/db1xxx_ss.c map->start = (u32)sock->virt_io; start 396 drivers/pcmcia/db1xxx_ss.c map->stop = map->start + IO_MAP_SIZE; start 469 drivers/pcmcia/db1xxx_ss.c sock->card_irq = r ? r->start : 0; start 475 drivers/pcmcia/db1xxx_ss.c sock->insert_irq = r ? r->start : -1; start 477 drivers/pcmcia/db1xxx_ss.c sock->insert_gpio = r ? r->start : -1; start 478 drivers/pcmcia/db1xxx_ss.c sock->insert_irq = r ? gpio_to_irq(r->start) : -1; start 483 drivers/pcmcia/db1xxx_ss.c sock->stschg_irq = r ? r->start : -1; start 487 drivers/pcmcia/db1xxx_ss.c sock->eject_irq = r ? r->start : -1; start 498 drivers/pcmcia/db1xxx_ss.c sock->phys_attr = r->start; start 507 drivers/pcmcia/db1xxx_ss.c sock->phys_mem = r->start; start 516 drivers/pcmcia/db1xxx_ss.c sock->phys_io = r->start; start 202 drivers/pcmcia/electra_cf.c cf->mem_phys = mem.start; start 219 drivers/pcmcia/electra_cf.c (__ioremap_at(io.start, cf->io_virt, cf->io_size, start 229 drivers/pcmcia/electra_cf.c cf->iomem.start = (unsigned long)cf->mem_base; start 230 drivers/pcmcia/electra_cf.c cf->iomem.end = (unsigned long)cf->mem_base + (mem.end - mem.start); start 294 drivers/pcmcia/electra_cf.c cf->mem_phys, io.start, cf->irq); start 399 drivers/pcmcia/i82092.c struct resource res = { .start = 0, .end = 0x0fff }; start 572 drivers/pcmcia/i82092.c if ((io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)){ start 584 drivers/pcmcia/i82092.c indirect_write16(sock,I365_IO(map)+I365_W_START,io->start); start 621 drivers/pcmcia/i82092.c if ( (mem->card_start > 0x3ffffff) || (region.start > region.end) || start 627 drivers/pcmcia/i82092.c (unsigned long long)region.start, start 642 drivers/pcmcia/i82092.c i = (region.start >> 12) & 0x0fff; start 670 drivers/pcmcia/i82092.c i = ((mem->card_start - region.start) >> 12) & 0x3fff; start 643 drivers/pcmcia/i82365.c unsigned int start, stop; start 646 drivers/pcmcia/i82365.c start = i365_get_pair(sock, I365_IO(0)+I365_W_START); start 651 drivers/pcmcia/i82365.c ((start & 0xfeef) != 0x02e8)) { start 652 drivers/pcmcia/i82365.c if (!request_region(start, stop-start+1, "i82365")) start 654 drivers/pcmcia/i82365.c release_region(start, stop-start+1); start 1038 drivers/pcmcia/i82365.c (unsigned long long)io->start, (unsigned long long)io->stop); start 1040 drivers/pcmcia/i82365.c if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || start 1041 drivers/pcmcia/i82365.c (io->stop < io->start)) return -EINVAL; start 1045 drivers/pcmcia/i82365.c i365_set_pair(sock, I365_IO(map)+I365_W_START, io->start); start 1068 drivers/pcmcia/i82365.c (unsigned long long)mem->res->start, start 1073 drivers/pcmcia/i82365.c (mem->res->start > mem->res->end) || (mem->speed > 1000)) start 1075 drivers/pcmcia/i82365.c if ((mem->res->start > 0xffffff) || (mem->res->end > 0xffffff)) start 1083 drivers/pcmcia/i82365.c i = (mem->res->start >> 12) & 0x0fff; start 1097 drivers/pcmcia/i82365.c i = ((mem->card_start - mem->res->start) >> 12) & 0x3fff; start 1207 drivers/pcmcia/i82365.c struct resource res = { .start = 0, .end = 0x1000 }; start 166 drivers/pcmcia/omap_cf.c io->start = cf->phys_cf + SZ_4K; start 167 drivers/pcmcia/omap_cf.c io->stop = io->start + SZ_2K - 1; start 248 drivers/pcmcia/omap_cf.c cf->iomem.start = cf->phys_cf; start 200 drivers/pcmcia/pcmcia_cis.c p_dev->resource[0]->start = p_dev->resource[0]->end = 0; start 201 drivers/pcmcia/pcmcia_cis.c p_dev->resource[1]->start = p_dev->resource[1]->end = 0; start 214 drivers/pcmcia/pcmcia_cis.c p_dev->resource[1]->start = io->win[1-i].base; start 217 drivers/pcmcia/pcmcia_cis.c p_dev->resource[0]->start = io->win[i].base; start 227 drivers/pcmcia/pcmcia_cis.c p_dev->resource[2]->start = p_dev->resource[2]->end = 0; start 231 drivers/pcmcia/pcmcia_cis.c p_dev->resource[2]->start = mem->win[0].host_addr; start 70 drivers/pcmcia/pcmcia_resource.c if ((s->io[i].res->start <= res->start) && start 75 drivers/pcmcia/pcmcia_resource.c res->start = res->end = 0; start 100 drivers/pcmcia/pcmcia_resource.c unsigned int base = res->start; start 130 drivers/pcmcia/pcmcia_resource.c res->start = base; start 131 drivers/pcmcia/pcmcia_resource.c res->end = res->start + num - 1; start 279 drivers/pcmcia/pcmcia_resource.c io_on.start = s->io[i].res->start; start 461 drivers/pcmcia/pcmcia_resource.c res->start = res->end = 0; start 576 drivers/pcmcia/pcmcia_resource.c u8 b = c->io[0].start & 0xff; start 578 drivers/pcmcia/pcmcia_resource.c b = (c->io[0].start >> 8) & 0xff; start 601 drivers/pcmcia/pcmcia_resource.c iomap.start = s->io[i].res->start; start 662 drivers/pcmcia/pcmcia_resource.c c->io[0].start = tmp.start; start 667 drivers/pcmcia/pcmcia_resource.c c->io[1].start = 0; start 858 drivers/pcmcia/pcmcia_resource.c if ((res->start && (s->features & SS_CAP_STATIC_MAP)) || start 859 drivers/pcmcia/pcmcia_resource.c (res->start & (align-1))) { start 863 drivers/pcmcia/pcmcia_resource.c if (res->start) start 880 drivers/pcmcia/pcmcia_resource.c win->res = pcmcia_find_mem_region(res->start, res->end, align, start 905 drivers/pcmcia/pcmcia_resource.c res->start = win->static_start; start 907 drivers/pcmcia/pcmcia_resource.c res->start = win->res->start; start 910 drivers/pcmcia/pcmcia_resource.c res->end += res->start - 1; start 445 drivers/pcmcia/pd6729.c indirect_write16(socket, I365_IO(map)+I365_W_START, io->start); start 480 drivers/pcmcia/pd6729.c if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) { start 491 drivers/pcmcia/pd6729.c i = (mem->res->start >> 12) & 0x0fff; start 519 drivers/pcmcia/pd6729.c indirect_write(socket, PD67_EXT_DATA, mem->res->start >> 24); start 523 drivers/pcmcia/pd6729.c i = ((mem->card_start - mem->res->start) >> 12) & 0x3fff; start 243 drivers/pcmcia/pxa2xx_base.c skt->res_skt.start = _PCMCIA(skt->nr); start 248 drivers/pcmcia/pxa2xx_base.c skt->res_io.start = _PCMCIAIO(skt->nr); start 253 drivers/pcmcia/pxa2xx_base.c skt->res_mem.start = _PCMCIAMem(skt->nr); start 258 drivers/pcmcia/pxa2xx_base.c skt->res_attr.start = _PCMCIAAttr(skt->nr); start 31 drivers/pcmcia/rsrc_iodyn.c resource_size_t start; start 33 drivers/pcmcia/rsrc_iodyn.c start = (res->start & ~data->mask) + data->offset; start 34 drivers/pcmcia/rsrc_iodyn.c if (start < res->start) start 35 drivers/pcmcia/rsrc_iodyn.c start += data->mask + 1; start 39 drivers/pcmcia/rsrc_iodyn.c if (start & 0x300) start 40 drivers/pcmcia/rsrc_iodyn.c start = (start + 0x3ff) & ~0x3ff; start 46 drivers/pcmcia/rsrc_iodyn.c if ((res->start + size - 1) >= 1024) start 47 drivers/pcmcia/rsrc_iodyn.c start = res->end; start 51 drivers/pcmcia/rsrc_iodyn.c return start; start 101 drivers/pcmcia/rsrc_iodyn.c if ((s->io[i].res->start & (align-1)) == *base) start 122 drivers/pcmcia/rsrc_iodyn.c *base = res->start; start 134 drivers/pcmcia/rsrc_iodyn.c if (adjust_resource(s->io[i].res, res->start, start 144 drivers/pcmcia/rsrc_iodyn.c try = res->start - num; start 147 drivers/pcmcia/rsrc_iodyn.c res->start - num, start 30 drivers/pcmcia/rsrc_mgr.c struct resource *pcmcia_make_resource(resource_size_t start, start 38 drivers/pcmcia/rsrc_mgr.c res->start = start; start 39 drivers/pcmcia/rsrc_mgr.c res->end = start + end - 1; start 274 drivers/pcmcia/rsrc_nonstatic.c s->cis_virt = ioremap(res->start, s->map_size); start 301 drivers/pcmcia/rsrc_nonstatic.c virt = ioremap(res->start, s->map_size); start 590 drivers/pcmcia/rsrc_nonstatic.c resource_size_t start) start 596 drivers/pcmcia/rsrc_nonstatic.c ret = (start & ~align_data->mask) + align_data->offset; start 597 drivers/pcmcia/rsrc_nonstatic.c if (ret < start) start 608 drivers/pcmcia/rsrc_nonstatic.c resource_size_t start; start 610 drivers/pcmcia/rsrc_nonstatic.c start = pcmcia_common_align(data, res->start); start 621 drivers/pcmcia/rsrc_nonstatic.c if (start < map_start) start 622 drivers/pcmcia/rsrc_nonstatic.c start = pcmcia_common_align(data, map_start); start 628 drivers/pcmcia/rsrc_nonstatic.c if (start >= res->end) start 631 drivers/pcmcia/rsrc_nonstatic.c if ((start + size - 1) <= map_end) start 639 drivers/pcmcia/rsrc_nonstatic.c start = res->end; start 641 drivers/pcmcia/rsrc_nonstatic.c return start; start 657 drivers/pcmcia/rsrc_nonstatic.c unsigned long start = m->base; start 660 drivers/pcmcia/rsrc_nonstatic.c if (start > r_start || r_end > end) start 730 drivers/pcmcia/rsrc_nonstatic.c if ((s->io[i].res->start & (align-1)) == *base) start 752 drivers/pcmcia/rsrc_nonstatic.c *base = res->start; start 764 drivers/pcmcia/rsrc_nonstatic.c ret = __nonstatic_adjust_io_region(s, res->start, start 767 drivers/pcmcia/rsrc_nonstatic.c ret = adjust_resource(s->io[i].res, res->start, start 779 drivers/pcmcia/rsrc_nonstatic.c try = res->start - num; start 782 drivers/pcmcia/rsrc_nonstatic.c res->start - num, start 786 drivers/pcmcia/rsrc_nonstatic.c res->start - num, start 857 drivers/pcmcia/rsrc_nonstatic.c static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) start 860 drivers/pcmcia/rsrc_nonstatic.c unsigned long size = end - start + 1; start 863 drivers/pcmcia/rsrc_nonstatic.c if (end < start) start 868 drivers/pcmcia/rsrc_nonstatic.c ret = add_interval(&data->mem_db, start, size); start 870 drivers/pcmcia/rsrc_nonstatic.c do_mem_probe(s, start, size, NULL, NULL); start 873 drivers/pcmcia/rsrc_nonstatic.c ret = sub_interval(&data->mem_db, start, size); start 883 drivers/pcmcia/rsrc_nonstatic.c static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) start 892 drivers/pcmcia/rsrc_nonstatic.c if (start < 0x100) start 893 drivers/pcmcia/rsrc_nonstatic.c start = 0x100; start 896 drivers/pcmcia/rsrc_nonstatic.c size = end - start + 1; start 898 drivers/pcmcia/rsrc_nonstatic.c if (end < start) start 906 drivers/pcmcia/rsrc_nonstatic.c if (add_interval(&data->io_db, start, size) != 0) { start 912 drivers/pcmcia/rsrc_nonstatic.c do_io_probe(s, start, size); start 916 drivers/pcmcia/rsrc_nonstatic.c sub_interval(&data->io_db, start, size); start 976 drivers/pcmcia/rsrc_nonstatic.c if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) start 991 drivers/pcmcia/rsrc_nonstatic.c if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) start 202 drivers/pcmcia/sa1111_generic.c if (!request_mem_region(dev->res.start, 512, SA1111_DRIVER_NAME(dev))) { start 234 drivers/pcmcia/sa1111_generic.c release_mem_region(dev->res.start, 512); start 253 drivers/pcmcia/sa1111_generic.c release_mem_region(dev->res.start, 512); start 175 drivers/pcmcia/sa11xx_base.c skt->res_skt.start = _PCMCIA(skt->nr); start 180 drivers/pcmcia/sa11xx_base.c skt->res_io.start = _PCMCIAIO(skt->nr); start 185 drivers/pcmcia/sa11xx_base.c skt->res_mem.start = _PCMCIAMem(skt->nr); start 190 drivers/pcmcia/sa11xx_base.c skt->res_attr.start = _PCMCIAAttr(skt->nr); start 568 drivers/pcmcia/soc_common.c map->map, map->speed, (unsigned long long)map->start, start 599 drivers/pcmcia/soc_common.c map->stop -= map->start; start 601 drivers/pcmcia/soc_common.c map->start = skt->socket.io_offset; start 656 drivers/pcmcia/soc_common.c map->static_start = res->start + map->card_start; start 821 drivers/pcmcia/soc_common.c skt->virt_io = ioremap(skt->res_io.start, 0x10000); start 702 drivers/pcmcia/tcic.c (unsigned long long)io->start, (unsigned long long)io->stop); start 703 drivers/pcmcia/tcic.c if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || start 704 drivers/pcmcia/tcic.c (io->stop < io->start)) return -EINVAL; start 708 drivers/pcmcia/tcic.c base = io->start; len = io->stop - io->start; start 739 drivers/pcmcia/tcic.c mem->speed, (unsigned long long)mem->res->start, start 742 drivers/pcmcia/tcic.c (mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) || start 743 drivers/pcmcia/tcic.c (mem->res->start > mem->res->end) || (mem->speed > 1000)) start 748 drivers/pcmcia/tcic.c base = mem->res->start; len = mem->res->end - mem->res->start; start 757 drivers/pcmcia/tcic.c mmap = mem->card_start - mem->res->start; start 779 drivers/pcmcia/tcic.c struct resource res = { .start = 0, .end = 0x1000 }; start 106 drivers/pcmcia/vrc4171_card.c .start = CARD_CONTROLLER_START, start 110 drivers/pcmcia/vrc4171_card.c .start = INTERRUPT_STATUS, start 114 drivers/pcmcia/vrc4171_card.c .start = CONFIGURATION1, start 375 drivers/pcmcia/vrc4171_card.c io->start > 0xffff || io->stop > 0xffff || io->start > io->stop) start 387 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_IO(map)+I365_W_START, io->start); start 412 drivers/pcmcia/vrc4171_card.c uint16_t start, stop, offset; start 418 drivers/pcmcia/vrc4171_card.c mem->res->start < CARD_MEM_START || mem->res->start > CARD_MEM_END || start 420 drivers/pcmcia/vrc4171_card.c mem->res->start > mem->res->end || start 434 drivers/pcmcia/vrc4171_card.c start = (mem->res->start >> 12) & 0x3fff; start 436 drivers/pcmcia/vrc4171_card.c start |= I365_MEM_16BIT; start 437 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_MEM(map)+I365_W_START, start); start 258 drivers/pcmcia/vrc4173_cardu.c io->start = exca_readw(socket, IO_WIN_SA(map)); start 291 drivers/pcmcia/vrc4173_cardu.c exca_writew(socket, IO_WIN_SA(map), io->start); start 308 drivers/pcmcia/vrc4173_cardu.c uint32_t start, stop, offset, page; start 319 drivers/pcmcia/vrc4173_cardu.c start = exca_readw(socket, MEM_WIN_SA(map)); start 320 drivers/pcmcia/vrc4173_cardu.c mem->flags |= (start & MEM_WIN_DSIZE) ? MAP_16BIT : 0; start 321 drivers/pcmcia/vrc4173_cardu.c start = (start & 0x0fff) << 12; start 329 drivers/pcmcia/vrc4173_cardu.c offset = ((offset & 0x3fff) << 12) + start; start 333 drivers/pcmcia/vrc4173_cardu.c mem->sys_start = start + page; start 334 drivers/pcmcia/vrc4173_cardu.c mem->sys_stop = start + page; start 463 drivers/pcmcia/vrc4173_cardu.c unsigned long start, len, flags; start 476 drivers/pcmcia/vrc4173_cardu.c start = pci_resource_start(dev, 0); start 477 drivers/pcmcia/vrc4173_cardu.c if (start == 0) { start 500 drivers/pcmcia/vrc4173_cardu.c socket->base = ioremap(start, len); start 519 drivers/pcmcia/vrc4173_cardu.c printk(KERN_INFO "%s at %#08lx, IRQ %d\n", socket->name, start, dev->irq); start 181 drivers/pcmcia/xxs1500_ss.c map->start = (u32)sock->virt_io; start 182 drivers/pcmcia/xxs1500_ss.c map->stop = map->start + IO_MAP_SIZE; start 227 drivers/pcmcia/xxs1500_ss.c sock->phys_attr = r->start; start 235 drivers/pcmcia/xxs1500_ss.c sock->phys_mem = r->start; start 243 drivers/pcmcia/xxs1500_ss.c sock->phys_io = r->start; start 424 drivers/pcmcia/yenta_socket.c exca_writew(socket, I365_IO(map)+I365_W_START, io->start); start 447 drivers/pcmcia/yenta_socket.c unsigned int start, stop, card_start; start 453 drivers/pcmcia/yenta_socket.c start = region.start; start 457 drivers/pcmcia/yenta_socket.c if (map > 4 || start > stop || ((start ^ stop) >> 24) || start 468 drivers/pcmcia/yenta_socket.c exca_writeb(socket, CB_MEM_PAGE(map), start >> 24); start 470 drivers/pcmcia/yenta_socket.c word = (start >> 12) & 0x0fff; start 493 drivers/pcmcia/yenta_socket.c word = ((card_start - start) >> 12) & 0x3fff; start 551 drivers/pcmcia/yenta_socket.c struct resource res = { .start = 0, .end = 0x0fff }; start 632 drivers/pcmcia/yenta_socket.c u32 align, size, start, end; start 637 drivers/pcmcia/yenta_socket.c start = PCIBIOS_MIN_CARDBUS_IO; start 640 drivers/pcmcia/yenta_socket.c unsigned long avail = root->end - root->start; start 654 drivers/pcmcia/yenta_socket.c start = PCIBIOS_MIN_MEM; start 659 drivers/pcmcia/yenta_socket.c if (allocate_resource(root, res, size, start, end, align, start 711 drivers/pcmcia/yenta_socket.c region.start = config_readl(socket, addr_start) & mask; start 713 drivers/pcmcia/yenta_socket.c if (region.start && region.end > region.start && !override_bios) { start 745 drivers/pcmcia/yenta_socket.c res->start = res->end = res->flags = 0; start 777 drivers/pcmcia/yenta_socket.c if (res->start != 0 && res->end != 0) start 779 drivers/pcmcia/yenta_socket.c res->start = res->end = res->flags = 0; start 1039 drivers/pcmcia/yenta_socket.c config_writel(socket, PCI_BASE_ADDRESS_0, region.start); start 1052 drivers/pcmcia/yenta_socket.c ((unsigned int)dev->subordinate->busn_res.start << 8) | /* secondary bus */ start 1107 drivers/pcmcia/yenta_socket.c if (sibling->busn_res.start > bridge_to_fix->busn_res.end start 1108 drivers/pcmcia/yenta_socket.c && sibling->busn_res.start <= upper_limit) start 1109 drivers/pcmcia/yenta_socket.c upper_limit = sibling->busn_res.start - 1; start 1423 drivers/perf/arm-cci.c .start = cci_pmu_start, start 1278 drivers/perf/arm-ccn.c .start = arm_ccn_pmu_event_start, start 1488 drivers/perf/arm-ccn.c irq = res->start; start 724 drivers/perf/arm_dsu_pmu.c .start = dsu_pmu_start, start 458 drivers/perf/arm_pmu.c armpmu->start(armpmu); start 728 drivers/perf/arm_pmu.c armpmu->start(armpmu); start 804 drivers/perf/arm_pmu.c .start = armpmu_start, start 128 drivers/perf/arm_pmu_acpi.c spe_resources[0].start = irq; start 751 drivers/perf/arm_smmuv3_pmu.c .start = smmu_pmu_event_start, start 795 drivers/perf/arm_smmuv3_pmu.c dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start); start 800 drivers/perf/arm_smmuv3_pmu.c (res_0->start) >> SMMU_PMCG_PA_SHIFT); start 802 drivers/perf/arm_smmuv3_pmu.c dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start); start 817 drivers/perf/arm_smmuv3_pmu.c err, &res_0->start); start 824 drivers/perf/arm_smmuv3_pmu.c err, &res_0->start); start 829 drivers/perf/arm_smmuv3_pmu.c &res_0->start, smmu_pmu->num_counters, start 903 drivers/perf/arm_spe_pmu.c .start = arm_spe_pmu_start, start 460 drivers/perf/fsl_imx8_ddr_perf.c .start = ddr_perf_event_start, start 390 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c .start = hisi_uncore_pmu_start, start 401 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c .start = hisi_uncore_pmu_start, start 391 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c .start = hisi_uncore_pmu_start, start 961 drivers/perf/qcom_l2_pmu.c .start = l2_cache_event_start, start 175 drivers/perf/qcom_l3_pmu.c void (*start)(struct perf_event *event); start 259 drivers/perf/qcom_l3_pmu.c .start = qcom_l3_cache__64bit_counter_start, start 331 drivers/perf/qcom_l3_pmu.c .start = qcom_l3_cache__32bit_counter_start, start 530 drivers/perf/qcom_l3_pmu.c ops->start(event); start 760 drivers/perf/qcom_l3_pmu.c .start = qcom_l3_cache__event_start, start 771 drivers/perf/qcom_l3_pmu.c dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start); start 785 drivers/perf/qcom_l3_pmu.c &memrc->start); start 563 drivers/perf/thunderx2_pmu.c .start = tx2_uncore_event_start, start 1117 drivers/perf/xgene_pmu.c .start = xgene_perf_start, start 149 drivers/phy/hisilicon/phy-hix5hd2-sata.c priv->base = devm_ioremap(dev, res->start, resource_size(res)); start 206 drivers/phy/marvell/phy-berlin-sata.c priv->base = devm_ioremap(dev, res->start, resource_size(res)); start 1017 drivers/phy/marvell/phy-mvebu-cp110-comphy.c priv->cp_phys = res->start; start 1130 drivers/phy/rockchip/phy-rockchip-typec.c if (phy_cfgs[index].reg == res->start) { start 876 drivers/phy/st/phy-miphy28lp.c *base = devm_ioremap(dev, res.start, resource_size(&res)); start 219 drivers/phy/ti/phy-dm816x-usb.c phy->usbphy_ctrl = (res->start & 0xff) + 4; start 501 drivers/pinctrl/actions/pinctrl-owl.c unsigned int start = 0, i; start 506 drivers/pinctrl/actions/pinctrl-owl.c if (*pin >= start && *pin < start + port->pins) { start 507 drivers/pinctrl/actions/pinctrl-owl.c *pin -= start; start 511 drivers/pinctrl/actions/pinctrl-owl.c start += port->pins; start 1053 drivers/pinctrl/bcm/pinctrl-ns2-mux.c pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, start 582 drivers/pinctrl/bcm/pinctrl-nsp-mux.c pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, start 614 drivers/pinctrl/freescale/pinctrl-imx1-core.c ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start, start 1554 drivers/pinctrl/intel/pinctrl-baytrail.c if (irq_rc && irq_rc->start) { start 1564 drivers/pinctrl/intel/pinctrl-baytrail.c (unsigned)irq_rc->start, start 201 drivers/pinctrl/intel/pinctrl-cherryview.c #define GPIO_PINRANGE(start, end) \ start 203 drivers/pinctrl/intel/pinctrl-cherryview.c .base = (start), \ start 204 drivers/pinctrl/intel/pinctrl-cherryview.c .npins = (end) - (start) + 1, \ start 161 drivers/pinctrl/mediatek/pinctrl-mtk-common.c if (pin >= ies_smt_infos[i].start && start 151 drivers/pinctrl/mediatek/pinctrl-mtk-common.h unsigned short start; start 159 drivers/pinctrl/mediatek/pinctrl-mtk-common.h .start = _start, \ start 803 drivers/pinctrl/mvebu/pinctrl-dove.c fb_res.start = 0; start 809 drivers/pinctrl/mvebu/pinctrl-dove.c (mpp_res->start & INT_REGS_MASK) + MPP4_REGS_OFFS, 0x4); start 821 drivers/pinctrl/mvebu/pinctrl-dove.c (mpp_res->start & INT_REGS_MASK) + PMU_REGS_OFFS, 0x8); start 835 drivers/pinctrl/mvebu/pinctrl-dove.c (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14); start 846 drivers/pinctrl/mvebu/pinctrl-dove.c if (fb_res.start) start 224 drivers/pinctrl/nomadik/pinctrl-abx500.c if (gpio >= cluster->start && gpio <= cluster->end) { start 231 drivers/pinctrl/nomadik/pinctrl-abx500.c hwirq = gpio - cluster->start + cluster->to_irq; start 99 drivers/pinctrl/nomadik/pinctrl-abx500.h .start = a, \ start 116 drivers/pinctrl/nomadik/pinctrl-abx500.h int start; start 1880 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c ioremap(res.start, resource_size(&res)); start 869 drivers/pinctrl/pinctrl-amd.c gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start, start 390 drivers/pinctrl/pinctrl-artpec6.c unsigned int start; start 412 drivers/pinctrl/pinctrl-artpec6.c return (pin - pin_register[i].start) * 4 + start 1059 drivers/pinctrl/pinctrl-at91-pio4.c atmel_pioctrl->irqs[i] = res->start; start 1060 drivers/pinctrl/pinctrl-at91-pio4.c irq_set_chained_handler(res->start, atmel_gpio_irq_handler); start 1061 drivers/pinctrl/pinctrl-at91-pio4.c irq_set_handler_data(res->start, atmel_pioctrl); start 869 drivers/pinctrl/pinctrl-rzn1.c ipctl->lev1_protect_phys = (u32)res->start + 0x400; start 875 drivers/pinctrl/pinctrl-rzn1.c ipctl->lev2_protect_phys = (u32)res->start + 0x400; start 284 drivers/pinctrl/pinctrl-single.c pa = pcs->res->start + offset; start 677 drivers/pinctrl/pinctrl-single.c (unsigned long)pcs->res->start + offset, val); start 1817 drivers/pinctrl/pinctrl-single.c pcs->res = devm_request_mem_region(pcs->dev, res->start, start 1825 drivers/pinctrl/pinctrl-single.c pcs->base = devm_ioremap(pcs->dev, pcs->res->start, pcs->size); start 1530 drivers/pinctrl/pinctrl-st.c gpio_irq = irq_res.start; start 1180 drivers/pinctrl/pinctrl-zynq.c pctrl->pctrl_offset = res->start; start 1040 drivers/pinctrl/samsung/pinctrl-samsung.c virt_base[i] = devm_ioremap(&pdev->dev, res->start, start 1114 drivers/pinctrl/samsung/pinctrl-samsung.c drvdata->irq = res->start; start 74 drivers/pinctrl/sh-pfc/core.c windows->phys = res->start; start 119 drivers/pinctrl/sh-pfc/core.c return pin >= range->start start 120 drivers/pinctrl/sh-pfc/core.c ? offset + pin - range->start : -1; start 122 drivers/pinctrl/sh-pfc/core.c offset += range->end - range->start + 1; start 423 drivers/pinctrl/sh-pfc/core.c pfc->ranges->start = 0; start 446 drivers/pinctrl/sh-pfc/core.c range->start = pfc->info->pins[0].pin; start 457 drivers/pinctrl/sh-pfc/core.c range->start = pfc->info->pins[i].pin; start 15 drivers/pinctrl/sh-pfc/core.h u16 start; start 378 drivers/pinctrl/sh-pfc/gpio.c if (range->start >= pfc->nr_gpio_pins) start 382 drivers/pinctrl/sh-pfc/gpio.c dev_name(pfc->dev), range->start, range->start, start 383 drivers/pinctrl/sh-pfc/gpio.c range->end - range->start + 1); start 856 drivers/pinctrl/ti/pinctrl-ti-iodelay.c iod->phys_base = res->start; start 874 drivers/pinctrl/ti/pinctrl-ti-iodelay.c ret = ti_iodelay_alloc_pins(dev, iod, res->start); start 94 drivers/platform/chrome/chromeos_pstore.c if (!res->start || !len) start 100 drivers/platform/chrome/chromeos_pstore.c chromeos_ramoops_data.mem_address = res->start; start 37 drivers/platform/chrome/wilco_ec/core.c return devm_request_region(dev, res->start, resource_size(res), start 68 drivers/platform/chrome/wilco_ec/core.c cros_ec_lpc_mec_init(ec->io_packet->start, start 69 drivers/platform/chrome/wilco_ec/core.c ec->io_packet->start + EC_MAILBOX_DATA_SIZE); start 58 drivers/platform/chrome/wilco_ec/mailbox.c if (!(inb(ec->io_command->start) & start 129 drivers/platform/chrome/wilco_ec/mailbox.c outb(EC_MAILBOX_START_COMMAND, ec->io_command->start); start 144 drivers/platform/chrome/wilco_ec/mailbox.c flag = inb(ec->io_data->start); start 910 drivers/platform/goldfish/goldfish_pipe.c dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); start 920 drivers/platform/goldfish/goldfish_pipe.c dev->irq = r->start; start 602 drivers/platform/x86/acerhdf.c static int str_starts_with(const char *str, const char *start) start 607 drivers/platform/x86/acerhdf.c start_len = strlen(start); start 610 drivers/platform/x86/acerhdf.c !strncmp(str, start, start_len)) start 627 drivers/platform/x86/apple-gmux.c gmux_data->iostart = res->start; start 628 drivers/platform/x86/apple-gmux.c gmux_data->iolen = res->end - res->start; start 713 drivers/platform/x86/intel_pmc_ipc.c res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET; start 714 drivers/platform/x86/intel_pmc_ipc.c res->end = res->start + TCO_REGS_SIZE - 1; start 717 drivers/platform/x86/intel_pmc_ipc.c res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET; start 718 drivers/platform/x86/intel_pmc_ipc.c res->end = res->start + SMI_EN_SIZE - 1; start 742 drivers/platform/x86/intel_pmc_ipc.c res->start = ipcdev.telem_punit_ssram_base; start 743 drivers/platform/x86/intel_pmc_ipc.c res->end = res->start + ipcdev.telem_punit_ssram_size - 1; start 746 drivers/platform/x86/intel_pmc_ipc.c res->start = ipcdev.telem_pmc_ssram_base; start 747 drivers/platform/x86/intel_pmc_ipc.c res->end = res->start + ipcdev.telem_pmc_ssram_size - 1; start 804 drivers/platform/x86/intel_pmc_ipc.c ipcdev.acpi_io_base = res->start; start 869 drivers/platform/x86/intel_pmc_ipc.c res->end = res->start + size - 1; start 887 drivers/platform/x86/intel_pmc_ipc.c ipcdev.telem_punit_ssram_base = res->start + start 890 drivers/platform/x86/intel_pmc_ipc.c ipcdev.telem_pmc_ssram_base = res->start + start 1136 drivers/platform/x86/intel_telemetry_pltdrv.c if (!devm_request_mem_region(&pdev->dev, res0->start, size, start 1141 drivers/platform/x86/intel_telemetry_pltdrv.c telm_conf->pss_config.ssram_base_addr = res0->start; start 1150 drivers/platform/x86/intel_telemetry_pltdrv.c if (!devm_request_mem_region(&pdev->dev, res1->start, size, start 1156 drivers/platform/x86/intel_telemetry_pltdrv.c telm_conf->ioss_config.ssram_base_addr = res1->start; start 1192 drivers/platform/x86/intel_telemetry_pltdrv.c release_mem_region(res0->start, resource_size(res0)); start 1194 drivers/platform/x86/intel_telemetry_pltdrv.c release_mem_region(res1->start, resource_size(res1)); start 2100 drivers/platform/x86/mlx-platform.c mlxplat_lpc_resources[1].start, 1); start 921 drivers/platform/x86/thinkpad_acpi.c char *start = *cmds; start 924 drivers/platform/x86/thinkpad_acpi.c while ((end = strchr(start, ',')) && end == start) start 925 drivers/platform/x86/thinkpad_acpi.c start = end + 1; start 932 drivers/platform/x86/thinkpad_acpi.c return start; start 170 drivers/pnp/base.h resource_size_t start, start 173 drivers/pnp/base.h resource_size_t start, start 176 drivers/pnp/base.h resource_size_t start, start 284 drivers/pnp/interface.c (unsigned long long) res->start, start 292 drivers/pnp/interface.c (unsigned long long) res->start); start 304 drivers/pnp/interface.c resource_size_t *start, start 308 drivers/pnp/interface.c if (start) start 309 drivers/pnp/interface.c *start = 0; start 318 drivers/pnp/interface.c if (start) { start 319 drivers/pnp/interface.c *start = simple_strtoull(buf, &buf, 0); start 326 drivers/pnp/interface.c *end = *start; start 385 drivers/pnp/interface.c resource_size_t start; start 399 drivers/pnp/interface.c &start, &end, start 401 drivers/pnp/interface.c pnp_add_io_resource(dev, start, end, flags); start 405 drivers/pnp/interface.c &start, &end, start 407 drivers/pnp/interface.c pnp_add_mem_resource(dev, start, end, flags); start 411 drivers/pnp/interface.c &start, NULL, start 413 drivers/pnp/interface.c pnp_add_irq_resource(dev, start, flags); start 417 drivers/pnp/interface.c &start, NULL, start 419 drivers/pnp/interface.c pnp_add_dma_resource(dev, start, flags); start 423 drivers/pnp/interface.c &start, &end, start 425 drivers/pnp/interface.c pnp_add_bus_resource(dev, start, end); start 920 drivers/pnp/isapnp/core.c tmp, (unsigned long long) res->start); start 922 drivers/pnp/isapnp/core.c res->start); start 928 drivers/pnp/isapnp/core.c int irq = res->start; start 939 drivers/pnp/isapnp/core.c tmp, (unsigned long long) res->start); start 940 drivers/pnp/isapnp/core.c isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start); start 947 drivers/pnp/isapnp/core.c tmp, (unsigned long long) res->start); start 949 drivers/pnp/isapnp/core.c (res->start >> 8) & 0xffff); start 45 drivers/pnp/manager.c "flags %#lx\n", idx, (unsigned long long) res->start, start 52 drivers/pnp/manager.c res->start = 0; start 61 drivers/pnp/manager.c res->start = rule->min; start 62 drivers/pnp/manager.c res->end = res->start + rule->size - 1; start 65 drivers/pnp/manager.c res->start += rule->align; start 66 drivers/pnp/manager.c res->end = res->start + rule->size - 1; start 67 drivers/pnp/manager.c if (res->start > rule->max || !rule->align) { start 77 drivers/pnp/manager.c pnp_add_io_resource(dev, res->start, res->end, res->flags); start 88 drivers/pnp/manager.c "flags %#lx\n", idx, (unsigned long long) res->start, start 95 drivers/pnp/manager.c res->start = 0; start 112 drivers/pnp/manager.c res->start = rule->min; start 113 drivers/pnp/manager.c res->end = res->start + rule->size - 1; start 116 drivers/pnp/manager.c res->start += rule->align; start 117 drivers/pnp/manager.c res->end = res->start + rule->size - 1; start 118 drivers/pnp/manager.c if (res->start > rule->max || !rule->align) { start 128 drivers/pnp/manager.c pnp_add_mem_resource(dev, res->start, res->end, res->flags); start 145 drivers/pnp/manager.c idx, (int) res->start, res->flags); start 151 drivers/pnp/manager.c res->start = -1; start 161 drivers/pnp/manager.c res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16); start 162 drivers/pnp/manager.c if (res->start < PNP_IRQ_NR) { start 163 drivers/pnp/manager.c res->end = res->start; start 168 drivers/pnp/manager.c res->start = res->end = xtab[i]; start 175 drivers/pnp/manager.c res->start = -1; start 186 drivers/pnp/manager.c pnp_add_irq_resource(dev, res->start, res->flags); start 204 drivers/pnp/manager.c idx, (int) res->start, res->flags); start 210 drivers/pnp/manager.c res->start = -1; start 221 drivers/pnp/manager.c res->start = res->end = xtab[i]; start 231 drivers/pnp/manager.c pnp_add_dma_resource(dev, res->start, res->flags); start 104 drivers/pnp/pnpacpi/rsparser.c pcibios_penalize_isa_irq(r->start, 1); start 155 drivers/pnp/pnpacpi/rsparser.c u64 start, length; start 157 drivers/pnp/pnpacpi/rsparser.c memcpy(&start, vendor->byte_data, sizeof(start)); start 160 drivers/pnp/pnpacpi/rsparser.c pnp_add_mem_resource(dev, start, start + length - 1, 0); start 677 drivers/pnp/pnpacpi/rsparser.c irq->interrupts[0] = p->start; start 680 drivers/pnp/pnpacpi/rsparser.c (int) p->start, start 707 drivers/pnp/pnpacpi/rsparser.c extended_irq->interrupts[0] = p->start; start 709 drivers/pnp/pnpacpi/rsparser.c pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start, start 756 drivers/pnp/pnpacpi/rsparser.c dma->channels[0] = p->start; start 760 drivers/pnp/pnpacpi/rsparser.c (int) p->start, dma->type, dma->transfer, dma->bus_master); start 773 drivers/pnp/pnpacpi/rsparser.c io->minimum = p->start; start 793 drivers/pnp/pnpacpi/rsparser.c fixed_io->address = p->start; start 814 drivers/pnp/pnpacpi/rsparser.c memory24->minimum = p->start; start 838 drivers/pnp/pnpacpi/rsparser.c memory32->minimum = p->start; start 863 drivers/pnp/pnpacpi/rsparser.c fixed_memory32->address = p->start; start 57 drivers/pnp/pnpbios/rsparser.c int start, int len) start 60 drivers/pnp/pnpbios/rsparser.c int end = start + len - 1; start 65 drivers/pnp/pnpbios/rsparser.c pnp_add_io_resource(dev, start, end, flags); start 69 drivers/pnp/pnpbios/rsparser.c int start, int len) start 72 drivers/pnp/pnpbios/rsparser.c int end = start + len - 1; start 77 drivers/pnp/pnpbios/rsparser.c pnp_add_mem_resource(dev, start, end, flags); start 508 drivers/pnp/pnpbios/rsparser.c base = res->start; start 532 drivers/pnp/pnpbios/rsparser.c base = res->start; start 562 drivers/pnp/pnpbios/rsparser.c base = res->start; start 588 drivers/pnp/pnpbios/rsparser.c map = 1 << res->start; start 604 drivers/pnp/pnpbios/rsparser.c map = 1 << res->start; start 620 drivers/pnp/pnpbios/rsparser.c base = res->start; start 639 drivers/pnp/pnpbios/rsparser.c unsigned long base = res->start; start 643 drivers/pnp/pnpbios/rsparser.c base = res->start; start 263 drivers/pnp/quirks.c if (res->start == 0 && res->end == 0) start 266 drivers/pnp/quirks.c pnp_start = res->start; start 308 drivers/pnp/quirks.c resource_size_t start, end; start 319 drivers/pnp/quirks.c if (res->end < mmconfig->start || res->start > mmconfig->end || start 320 drivers/pnp/quirks.c (res->start == mmconfig->start && res->end == mmconfig->end)) start 326 drivers/pnp/quirks.c if (mmconfig->start < res->start) { start 327 drivers/pnp/quirks.c start = mmconfig->start; start 328 drivers/pnp/quirks.c end = res->start - 1; start 329 drivers/pnp/quirks.c pnp_add_mem_resource(dev, start, end, 0); start 332 drivers/pnp/quirks.c start = res->end + 1; start 334 drivers/pnp/quirks.c pnp_add_mem_resource(dev, start, end, 0); start 391 drivers/pnp/quirks.c region.start = addr_lo & ~0x7fff; start 393 drivers/pnp/quirks.c region.start |= (u64) addr_hi << 32; start 394 drivers/pnp/quirks.c region.end = region.start + 32*1024 - 1; start 402 drivers/pnp/quirks.c if (res->end < mch.start || res->start > mch.end) start 404 drivers/pnp/quirks.c if (res->start == mch.start && res->end == mch.end) start 409 drivers/pnp/quirks.c res->start = mch.start; start 157 drivers/pnp/resource.c #define length(start, end) (*(end) - *(start) + 1) start 173 drivers/pnp/resource.c port = &res->start; start 199 drivers/pnp/resource.c tport = &tres->start; start 218 drivers/pnp/resource.c tport = &tres->start; start 236 drivers/pnp/resource.c addr = &res->start; start 262 drivers/pnp/resource.c taddr = &tres->start; start 281 drivers/pnp/resource.c taddr = &tres->start; start 359 drivers/pnp/resource.c irq = &res->start; start 378 drivers/pnp/resource.c if (tres->start == *irq) start 406 drivers/pnp/resource.c if (tres->start == *irq) start 423 drivers/pnp/resource.c dma = &res->start; start 442 drivers/pnp/resource.c if (tres->start == *dma) start 465 drivers/pnp/resource.c if (tres->start == *dma) start 540 drivers/pnp/resource.c res->start = irq; start 561 drivers/pnp/resource.c res->start = dma; start 569 drivers/pnp/resource.c resource_size_t start, start 578 drivers/pnp/resource.c (unsigned long long) start, start 585 drivers/pnp/resource.c res->start = start; start 593 drivers/pnp/resource.c resource_size_t start, start 602 drivers/pnp/resource.c (unsigned long long) start, start 609 drivers/pnp/resource.c res->start = start; start 617 drivers/pnp/resource.c resource_size_t start, start 626 drivers/pnp/resource.c (unsigned long long) start, start 633 drivers/pnp/resource.c res->start = start; start 644 drivers/pnp/resource.c int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start, start 660 drivers/pnp/resource.c if (port->min == start && port->size == size) start 665 drivers/pnp/resource.c if (mem->min == start && mem->size == size) start 670 drivers/pnp/resource.c if (start < PNP_IRQ_NR && start 671 drivers/pnp/resource.c test_bit(start, irq->map.bits)) start 676 drivers/pnp/resource.c if (dma->map & (1 << start)) start 686 drivers/pnp/resource.c int pnp_range_reserved(resource_size_t start, resource_size_t end) start 694 drivers/pnp/resource.c dev_start = &pnp_res->res.start; start 696 drivers/pnp/resource.c if (ranged_conflict(&start, &end, dev_start, dev_end)) start 30 drivers/pnp/system.c resource_size_t start = r->start, end = r->end; start 39 drivers/pnp/system.c res = request_region(start, end - start + 1, regionid); start 41 drivers/pnp/system.c res = request_mem_region(start, end - start + 1, regionid); start 64 drivers/pnp/system.c if (res->start == 0) start 66 drivers/pnp/system.c if (res->start < 0x100) start 76 drivers/pnp/system.c if (res->end < res->start) start 877 drivers/power/avs/smartreflex.c sr_info->irq = irq->start; start 90 drivers/power/reset/qnap-poweroff.c base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 265 drivers/power/supply/ds2760_battery.c int ret, i, start, count, scale[5]; start 274 drivers/power/supply/ds2760_battery.c start = 0; start 277 drivers/power/supply/ds2760_battery.c start = DS2760_VOLTAGE_MSB; start 278 drivers/power/supply/ds2760_battery.c count = DS2760_TEMP_LSB - start + 1; start 281 drivers/power/supply/ds2760_battery.c ret = w1_ds2760_read(di->dev, di->raw + start, start, count); start 217 drivers/power/supply/goldfish_battery.c data->reg_base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 491 drivers/power/supply/lp8788-charger.c irq_start = r->start; start 339 drivers/power/supply/pda_power.c ret = request_irq(ac_irq->start, power_changed_isr, start 363 drivers/power/supply/pda_power.c ret = request_irq(usb_irq->start, power_changed_isr, start 403 drivers/power/supply/pda_power.c free_irq(usb_irq->start, pda_psy_usb); start 410 drivers/power/supply/pda_power.c free_irq(ac_irq->start, pda_psy_ac); start 433 drivers/power/supply/pda_power.c free_irq(usb_irq->start, pda_psy_usb); start 435 drivers/power/supply/pda_power.c free_irq(ac_irq->start, pda_psy_ac); start 475 drivers/power/supply/pda_power.c ac_wakeup_enabled = !enable_irq_wake(ac_irq->start); start 477 drivers/power/supply/pda_power.c usb_wakeup_enabled = !enable_irq_wake(usb_irq->start); start 487 drivers/power/supply/pda_power.c disable_irq_wake(usb_irq->start); start 489 drivers/power/supply/pda_power.c disable_irq_wake(ac_irq->start); start 122 drivers/ps3/ps3stor_lib.c dev->region_idx, dev->regions[dev->region_idx].start, start 592 drivers/ptp/ptp_qoriq.c base = ioremap(ptp_qoriq->rsrc->start, start 106 drivers/ptp/ptp_sysfs.c &req.perout.start.sec, &req.perout.start.nsec, start 40 drivers/pwm/core.c unsigned int start; start 48 drivers/pwm/core.c start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from, start 51 drivers/pwm/core.c if (pwm >= 0 && start != pwm) start 54 drivers/pwm/core.c if (start + count > MAX_PWMS) start 57 drivers/pwm/core.c return start; start 1203 drivers/pwm/core.c .start = pwm_seq_start, start 67 drivers/pwm/pwm-omap-dmtimer.c omap->pdata->start(omap->dm_timer); start 277 drivers/pwm/pwm-omap-dmtimer.c !pdata->start || start 127 drivers/pwm/pwm-renesas-tpu.c static void tpu_pwm_start_stop(struct tpu_pwm_device *pwm, int start) start 135 drivers/pwm/pwm-renesas-tpu.c if (start) start 414 drivers/rapidio/devices/tsi721.c if ((dbell->res->start <= DBELL_INF(idb.bytes)) && start 39 drivers/rapidio/rio-scan.c u16 start; /* logical minimal id */ start 66 drivers/rapidio/rio-scan.c destid += idtab->start; start 87 drivers/rapidio/rio-scan.c destid -= idtab->start; start 105 drivers/rapidio/rio-scan.c destid -= idtab->start; start 125 drivers/rapidio/rio-scan.c destid += idtab->start; start 145 drivers/rapidio/rio-scan.c destid += idtab->start; start 836 drivers/rapidio/rio-scan.c int do_enum, u16 start) start 860 drivers/rapidio/rio-scan.c idtab->start = start; start 435 drivers/rapidio/rio.c u16 start, start 444 drivers/rapidio/rio.c rio_init_dbell_res(res, start, end); start 474 drivers/rapidio/rio.c int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) start 481 drivers/rapidio/rio.c if ((dbell->res->start == start) && (dbell->res->end == end)) { start 515 drivers/rapidio/rio.c struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, start 521 drivers/rapidio/rio.c rio_init_dbell_res(res, start, end); start 1284 drivers/rapidio/rio_cm.c int start, end; start 1293 drivers/rapidio/rio_cm.c start = ch_num; start 1297 drivers/rapidio/rio_cm.c start = chstart; start 1303 drivers/rapidio/rio_cm.c id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); start 330 drivers/regulator/88pm8607.c if (info->desc.vsel_reg == res->start) start 335 drivers/regulator/88pm8607.c (unsigned long long)res->start); start 235 drivers/regulator/max8925-regulator.c if (ri->vol_reg == res->start) start 241 drivers/regulator/max8925-regulator.c (unsigned long long)res->start); start 195 drivers/regulator/pbias-regulator.c offset = res->start; start 751 drivers/regulator/ti-abb-regulator.c abb->int_base = devm_ioremap_nocache(dev, res->start, start 771 drivers/regulator/ti-abb-regulator.c abb->efuse_base = devm_ioremap_nocache(dev, res->start, start 422 drivers/regulator/wm831x-dcdc.c dcdc->base = res->start; start 580 drivers/regulator/wm831x-dcdc.c dcdc->base = res->start; start 707 drivers/regulator/wm831x-dcdc.c dcdc->base = res->start; start 140 drivers/regulator/wm831x-isink.c isink->reg = res->start; start 246 drivers/regulator/wm831x-ldo.c ldo->base = res->start; start 456 drivers/regulator/wm831x-ldo.c ldo->base = res->start; start 601 drivers/regulator/wm831x-ldo.c ldo->base = res->start; start 571 drivers/regulator/wm8350-regulator.c int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start, start 578 drivers/regulator/wm8350-regulator.c __func__, dcdc, start, stop); start 581 drivers/regulator/wm8350-regulator.c if (start > 15 || stop > 15) start 611 drivers/regulator/wm8350-regulator.c val | (start << WM8350_DC1_ENSLOT_SHIFT) | start 619 drivers/regulator/wm8350-regulator.c int wm8350_ldo_set_slot(struct wm8350 *wm8350, int ldo, u16 start, u16 stop) start 625 drivers/regulator/wm8350-regulator.c __func__, ldo, start, stop); start 628 drivers/regulator/wm8350-regulator.c if (start > 15 || stop > 15) start 649 drivers/regulator/wm8350-regulator.c wm8350_reg_write(wm8350, slot_reg, val | ((start << 10) | (stop << 6))); start 193 drivers/remoteproc/da8xx_remoteproc.c .start = da8xx_rproc_start, start 221 drivers/remoteproc/da8xx_remoteproc.c drproc->mem[i].bus_addr = res->start; start 223 drivers/remoteproc/da8xx_remoteproc.c res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK; start 244 drivers/remoteproc/imx_rproc.c .start = imx_rproc_start, start 304 drivers/remoteproc/imx_rproc.c priv->mem[b].sys_addr = res.start; start 289 drivers/remoteproc/keystone_remoteproc.c .start = keystone_rproc_start, start 319 drivers/remoteproc/keystone_remoteproc.c ksproc->mem[i].bus_addr = res->start; start 321 drivers/remoteproc/keystone_remoteproc.c res->start & KEYSTONE_RPROC_LOCAL_ADDRESS_MASK; start 173 drivers/remoteproc/omap_remoteproc.c .start = omap_rproc_start, start 59 drivers/remoteproc/qcom_common.c glink->subdev.start = glink_subdev_start; start 155 drivers/remoteproc/qcom_common.c smd->subdev.start = smd_subdev_start; start 286 drivers/remoteproc/qcom_q6v5_adsp.c .start = adsp_start, start 352 drivers/remoteproc/qcom_q6v5_adsp.c adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start, start 396 drivers/remoteproc/qcom_q6v5_adsp.c adsp->mem_phys = adsp->mem_reloc = r.start; start 402 drivers/remoteproc/qcom_q6v5_adsp.c &r.start, adsp->mem_size); start 1224 drivers/remoteproc/qcom_q6v5_mss.c .start = q6v5_start, start 1376 drivers/remoteproc/qcom_q6v5_mss.c qproc->mba_phys = r.start; start 1381 drivers/remoteproc/qcom_q6v5_mss.c &r.start, qproc->mba_size); start 1394 drivers/remoteproc/qcom_q6v5_mss.c qproc->mpss_phys = qproc->mpss_reloc = r.start; start 1399 drivers/remoteproc/qcom_q6v5_mss.c &r.start, qproc->mpss_size); start 175 drivers/remoteproc/qcom_q6v5_pas.c .start = adsp_start, start 236 drivers/remoteproc/qcom_q6v5_pas.c adsp->mem_phys = adsp->mem_reloc = r.start; start 241 drivers/remoteproc/qcom_q6v5_pas.c &r.start, adsp->mem_size); start 431 drivers/remoteproc/qcom_q6v5_wcss.c .start = q6v5_wcss_start, start 546 drivers/remoteproc/qcom_sysmon.c sysmon->subdev.start = sysmon_start; start 303 drivers/remoteproc/qcom_wcnss.c .start = wcnss_start, start 446 drivers/remoteproc/qcom_wcnss.c wcnss->mem_phys = wcnss->mem_reloc = r.start; start 451 drivers/remoteproc/qcom_wcnss.c &r.start, wcnss->mem_size); start 555 drivers/remoteproc/remoteproc_core.c rvdev->subdev.start = rproc_vdev_do_start; start 1123 drivers/remoteproc/remoteproc_core.c if (subdev->start) { start 1124 drivers/remoteproc/remoteproc_core.c ret = subdev->start(subdev); start 1332 drivers/remoteproc/remoteproc_core.c ret = rproc->ops->start(rproc); start 231 drivers/remoteproc/st_remoteproc.c .start = st_rproc_start, start 201 drivers/remoteproc/st_slim_rproc.c .start = slim_rproc_start, start 259 drivers/remoteproc/st_slim_rproc.c slim_rproc->mem[i].bus_addr = res->start; start 471 drivers/remoteproc/stm32_rproc.c .start = stm32_rproc_start, start 107 drivers/remoteproc/wkup_m3_rproc.c .start = wkup_m3_rproc_start, start 178 drivers/remoteproc/wkup_m3_rproc.c wkupm3->mem[i].bus_addr = res->start; start 40 drivers/reset/reset-socfpga.c if (!request_mem_region(res.start, size, np->name)) { start 45 drivers/reset/reset-socfpga.c data->membase = ioremap(res.start, size); start 40 drivers/reset/reset-sunxi.c if (!request_mem_region(res.start, size, np->name)) { start 45 drivers/reset/reset-sunxi.c data->membase = ioremap(res.start, size); start 112 drivers/reset/reset-zynq.c priv->offset = res->start; start 280 drivers/rpmsg/qcom_glink_rpm.c msg_ram = devm_ioremap(dev, r.start, resource_size(&r)); start 649 drivers/rtc/rtc-abx80x.c .start = abx80x_wdog_start, start 384 drivers/rtc/rtc-at91rm9200.c at91_rtc_regs = devm_ioremap(&pdev->dev, regs->start, start 160 drivers/rtc/rtc-bq4802.c p->ioport = p->r->start; start 164 drivers/rtc/rtc-bq4802.c p->regs = devm_ioremap(&pdev->dev, p->r->start, start 735 drivers/rtc/rtc-cmos.c ports = request_region(ports->start, resource_size(ports), start 738 drivers/rtc/rtc-cmos.c ports = request_mem_region(ports->start, resource_size(ports), start 763 drivers/rtc/rtc-cmos.c if (can_bank2 && ports->end > (ports->start + 1)) start 893 drivers/rtc/rtc-cmos.c release_region(ports->start, resource_size(ports)); start 895 drivers/rtc/rtc-cmos.c release_mem_region(ports->start, resource_size(ports)); start 924 drivers/rtc/rtc-cmos.c release_region(ports->start, resource_size(ports)); start 926 drivers/rtc/rtc-cmos.c release_mem_region(ports->start, resource_size(ports)); start 1081 drivers/rtc/rtc-ds1685.c if (!devm_request_mem_region(&pdev->dev, res->start, rtc->size, start 1089 drivers/rtc/rtc-ds1685.c rtc->baseaddr = res->start; start 1090 drivers/rtc/rtc-ds1685.c rtc->regs = devm_ioremap(&pdev->dev, res->start, rtc->size); start 148 drivers/rtc/rtc-ftrtc010.c rtc->rtc_irq = res->start; start 154 drivers/rtc/rtc-ftrtc010.c rtc->rtc_base = devm_ioremap(dev, res->start, start 271 drivers/rtc/rtc-lp8788.c irq = r->start; start 157 drivers/rtc/rtc-m48t35.c if (!devm_request_mem_region(&pdev->dev, res->start, priv->size, start 161 drivers/rtc/rtc-m48t35.c priv->baseaddr = res->start; start 421 drivers/rtc/rtc-m48t59.c m48t59->ioaddr = devm_ioremap(&pdev->dev, res->start, start 326 drivers/rtc/rtc-mrst.c iomem = devm_request_mem_region(dev, iomem->start, resource_size(iomem), start 215 drivers/rtc/rtc-msm6242.c priv->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 334 drivers/rtc/rtc-mt6397.c rtc->addr_base = res->start; start 319 drivers/rtc/rtc-pcf2127.c .start = pcf2127_wdt_start, start 186 drivers/rtc/rtc-pcf8523.c u8 start = REG_SECONDS, regs[7]; start 201 drivers/rtc/rtc-pcf8523.c msgs[0].buf = &start; start 119 drivers/rtc/rtc-pl030.c rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); start 336 drivers/rtc/rtc-pl031.c ldata->base = devm_ioremap(&adev->dev, adev->res.start, start 224 drivers/rtc/rtc-puv3.c puv3_rtc_mem = request_mem_region(res->start, resource_size(res), start 333 drivers/rtc/rtc-pxa.c pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start, start 241 drivers/rtc/rtc-rp5c01.c priv->regs = devm_ioremap(&dev->dev, res->start, resource_size(res)); start 502 drivers/rtc/rtc-sh.c rtc->res = devm_request_mem_region(&pdev->dev, res->start, start 507 drivers/rtc/rtc-sh.c rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start, start 265 drivers/rtc/rtc-stmp3xxx.c rtc_data->io = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 72 drivers/rtc/rtc-v3020.c chip->ioaddress = ioremap(pdev->resource[0].start, 1); start 328 drivers/rtc/rtc-v3020.c (unsigned long long)pdev->resource[0].start, start 263 drivers/rtc/rtc-vr41xx.c rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 273 drivers/rtc/rtc-vr41xx.c rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 3384 drivers/s390/block/dasd.c geo->start = get_start_sect(bdev) >> base->block->s2b_shift; start 3231 drivers/s390/block/dasd_eckd.c static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, start 3237 drivers/s390/block/dasd_eckd.c head = fmt_buffer[start].head; start 3248 drivers/s390/block/dasd_eckd.c for (i = start; i < max; i++) { start 3249 drivers/s390/block/dasd_eckd.c if (i > start) { start 3258 drivers/s390/block/dasd_eckd.c return i - start; start 128 drivers/s390/block/dasd_proc.c .start = dasd_devices_start, start 73 drivers/s390/block/dcssblk.c unsigned long start; start 87 drivers/s390/block/dcssblk.c unsigned long start; start 220 drivers/s390/block/dcssblk.c lowest_addr = entry->start; start 223 drivers/s390/block/dcssblk.c if (lowest_addr > entry->start) start 224 drivers/s390/block/dcssblk.c lowest_addr = entry->start; start 256 drivers/s390/block/dcssblk.c if (sort_list[j].start > sort_list[i].start) { start 267 drivers/s390/block/dcssblk.c if ((sort_list[i].end + 1) != sort_list[i+1].start) { start 320 drivers/s390/block/dcssblk.c &(*seg_info)->start, &(*seg_info)->end); start 625 drivers/s390/block/dcssblk.c dev_info->start = dcssblk_find_lowest_addr(dev_info); start 646 drivers/s390/block/dcssblk.c seg_byte_size = (dev_info->end - dev_info->start + 1); start 898 drivers/s390/block/dcssblk.c source_addr = dev_info->start + (index<<12) + bytes_done; start 925 drivers/s390/block/dcssblk.c dev_sz = dev_info->end - dev_info->start + 1; start 927 drivers/s390/block/dcssblk.c *kaddr = (void *) dev_info->start + offset; start 929 drivers/s390/block/dcssblk.c *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), start 1020 drivers/s390/block/dcssblk.c unsigned long start, end; start 1027 drivers/s390/block/dcssblk.c &start, &end); start 1033 drivers/s390/block/dcssblk.c if (start != entry->start || end != entry->end) { start 246 drivers/s390/block/xpram.c geo->start = 4; start 68 drivers/s390/char/con3215.c int start, len; /* start index & len in output buffer */ start 192 drivers/s390/char/con3215.c req->start = (raw->head - raw->count + raw->written) & start 200 drivers/s390/char/con3215.c ix = req->start; start 206 drivers/s390/char/con3215.c len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1; start 215 drivers/s390/char/con3215.c ix = req->start; start 1169 drivers/s390/char/con3215.c .start = tty3215_start, start 114 drivers/s390/char/hmcdrv_ftp.c char *start; start 128 drivers/s390/char/hmcdrv_ftp.c start = cmd; start 134 drivers/s390/char/hmcdrv_ftp.c ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start); start 139 drivers/s390/char/hmcdrv_ftp.c ftp->fname = start; start 383 drivers/s390/char/sclp.h sclp_find_gds_vector(void *start, void *end, u16 id) start 387 drivers/s390/char/sclp.h for (v = start; (void *) v < end; v = (void *) v + v->length) start 394 drivers/s390/char/sclp.h sclp_find_gds_subvector(void *start, void *end, u8 key) start 398 drivers/s390/char/sclp.h for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length) start 222 drivers/s390/char/sclp_cmd.c unsigned long long start; start 228 drivers/s390/char/sclp_cmd.c start = rn2addr(rn); start 229 drivers/s390/char/sclp_cmd.c storage_key_init_range(start, start + sclp.rzm); start 278 drivers/s390/char/sclp_cmd.c static int sclp_mem_change_state(unsigned long start, unsigned long size, start 287 drivers/s390/char/sclp_cmd.c if (start + size - 1 < istart) start 289 drivers/s390/char/sclp_cmd.c if (start > istart + sclp.rzm - 1) start 301 drivers/s390/char/sclp_cmd.c static bool contains_standby_increment(unsigned long start, unsigned long end) start 310 drivers/s390/char/sclp_cmd.c if (start > istart + sclp.rzm - 1) start 321 drivers/s390/char/sclp_cmd.c unsigned long start, size; start 327 drivers/s390/char/sclp_cmd.c start = arg->start_pfn << PAGE_SHIFT; start 339 drivers/s390/char/sclp_cmd.c if (contains_standby_increment(start, start + size)) start 346 drivers/s390/char/sclp_cmd.c rc = sclp_mem_change_state(start, size, 1); start 349 drivers/s390/char/sclp_cmd.c sclp_mem_change_state(start, size, 0); start 352 drivers/s390/char/sclp_cmd.c sclp_mem_change_state(start, size, 0); start 368 drivers/s390/char/sclp_cmd.c static void __init align_to_block_size(unsigned long long *start, start 374 drivers/s390/char/sclp_cmd.c start_align = roundup(*start, alignment); start 375 drivers/s390/char/sclp_cmd.c size_align = rounddown(*start + *size, alignment) - start_align; start 378 drivers/s390/char/sclp_cmd.c *start, size_align >> 20, *size >> 20); start 379 drivers/s390/char/sclp_cmd.c *start = start_align; start 385 drivers/s390/char/sclp_cmd.c unsigned long long start, size, addr, block_size; start 394 drivers/s390/char/sclp_cmd.c start = rn2addr(first_rn); start 396 drivers/s390/char/sclp_cmd.c if (start >= VMEM_MAX_PHYS) start 398 drivers/s390/char/sclp_cmd.c if (start + size > VMEM_MAX_PHYS) start 399 drivers/s390/char/sclp_cmd.c size = VMEM_MAX_PHYS - start; start 400 drivers/s390/char/sclp_cmd.c if (memory_end_set && (start >= memory_end)) start 402 drivers/s390/char/sclp_cmd.c if (memory_end_set && (start + size > memory_end)) start 403 drivers/s390/char/sclp_cmd.c size = memory_end - start; start 405 drivers/s390/char/sclp_cmd.c align_to_block_size(&start, &size, block_size); start 408 drivers/s390/char/sclp_cmd.c for (addr = start; addr < start + size; addr += block_size) start 304 drivers/s390/char/sclp_early_core.c void __weak __init add_mem_detect_block(u64 start, u64 end) {} start 102 drivers/s390/char/tape_proc.c .start = tape_proc_start, start 293 drivers/s390/cio/airq.c unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, start 299 drivers/s390/cio/airq.c bit = find_next_bit_inv(iv->vector, end, start); start 389 drivers/s390/cio/blacklist.c .start = cio_ignore_proc_seq_start, start 277 drivers/s390/cio/ccwgroup.c char *start, *end; start 279 drivers/s390/cio/ccwgroup.c start = (char *)*buf; start 280 drivers/s390/cio/ccwgroup.c end = strchr(start, ','); start 283 drivers/s390/cio/ccwgroup.c end = strchr(start, '\n'); start 286 drivers/s390/cio/ccwgroup.c len = strlen(start) + 1; start 288 drivers/s390/cio/ccwgroup.c len = end - start + 1; start 292 drivers/s390/cio/ccwgroup.c if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3) start 137 drivers/s390/cio/itcw.c static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, start 142 drivers/s390/cio/itcw.c addr = ALIGN(*start, align); start 149 drivers/s390/cio/itcw.c *start = addr + len; start 183 drivers/s390/cio/itcw.c addr_t start; start 188 drivers/s390/cio/itcw.c start = (addr_t) buffer; start 189 drivers/s390/cio/itcw.c end = start + size; start 194 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); start 210 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); start 218 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); start 227 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tidaw) * start 235 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tidaw) * start 242 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); start 249 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); start 256 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); start 263 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); start 89 drivers/s390/cio/qdio.h int *start, int *count) start 93 drivers/s390/cio/qdio.h unsigned long _queuestart = ((unsigned long)queue << 32) | *start; start 101 drivers/s390/cio/qdio.h *start = _queuestart & 0xff; start 107 drivers/s390/cio/qdio.h int *start, int *count, int ack) start 111 drivers/s390/cio/qdio.h unsigned long _queuestart = ((unsigned long)queue << 32) | *start; start 120 drivers/s390/cio/qdio.h *start = _queuestart & 0xff; start 113 drivers/s390/cio/qdio_main.c int start, int count, int auto_ack) start 115 drivers/s390/cio/qdio_main.c int tmp_count = count, tmp_start = start, nr = q->nr; start 162 drivers/s390/cio/qdio_main.c static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, start 166 drivers/s390/cio/qdio_main.c int tmp_count = count, tmp_start = start; start 423 drivers/s390/cio/qdio_main.c static void process_buffer_error(struct qdio_q *q, unsigned int start, start 433 drivers/s390/cio/qdio_main.c q->sbal[start]->element[15].sflags == 0x10) { start 435 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); start 441 drivers/s390/cio/qdio_main.c DBF_ERROR("FTC:%3d C:%3d", start, count); start 443 drivers/s390/cio/qdio_main.c q->sbal[start]->element[14].sflags, start 444 drivers/s390/cio/qdio_main.c q->sbal[start]->element[15].sflags); start 451 drivers/s390/cio/qdio_main.c set_buf_states(q, start, state, count); start 454 drivers/s390/cio/qdio_main.c static inline void inbound_primed(struct qdio_q *q, unsigned int start, start 466 drivers/s390/cio/qdio_main.c q->u.in.ack_start = start; start 474 drivers/s390/cio/qdio_main.c q->u.in.ack_start = start; start 482 drivers/s390/cio/qdio_main.c new = add_buf(start, count - 1); start 497 drivers/s390/cio/qdio_main.c set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); start 500 drivers/s390/cio/qdio_main.c static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) start 519 drivers/s390/cio/qdio_main.c count = get_buf_states(q, start, &state, count, 1, 0); start 525 drivers/s390/cio/qdio_main.c inbound_primed(q, start, count); start 532 drivers/s390/cio/qdio_main.c process_buffer_error(q, start, count); start 544 drivers/s390/cio/qdio_main.c q->nr, start); start 552 drivers/s390/cio/qdio_main.c static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start) start 556 drivers/s390/cio/qdio_main.c count = get_inbound_buffer_frontier(q, start); start 564 drivers/s390/cio/qdio_main.c static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) start 573 drivers/s390/cio/qdio_main.c get_buf_state(q, start, &state, 0); start 591 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start); start 597 drivers/s390/cio/qdio_main.c static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) start 600 drivers/s390/cio/qdio_main.c int j, b = start; start 638 drivers/s390/cio/qdio_main.c int start = q->first_to_kick; start 645 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); start 649 drivers/s390/cio/qdio_main.c start, count); start 652 drivers/s390/cio/qdio_main.c q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, start 656 drivers/s390/cio/qdio_main.c q->first_to_kick = add_buf(start, count); start 671 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; start 676 drivers/s390/cio/qdio_main.c count = qdio_inbound_q_moved(q, start); start 680 drivers/s390/cio/qdio_main.c start = add_buf(start, count); start 681 drivers/s390/cio/qdio_main.c q->first_to_check = start; start 684 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { start 696 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { start 708 drivers/s390/cio/qdio_main.c static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) start 726 drivers/s390/cio/qdio_main.c count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq); start 742 drivers/s390/cio/qdio_main.c process_buffer_error(q, start, count); start 769 drivers/s390/cio/qdio_main.c static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) start 773 drivers/s390/cio/qdio_main.c count = get_outbound_buffer_frontier(q, start); start 778 drivers/s390/cio/qdio_main.c qdio_handle_aobs(q, start, count); start 828 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; start 834 drivers/s390/cio/qdio_main.c count = qdio_outbound_q_moved(q, start); start 836 drivers/s390/cio/qdio_main.c q->first_to_check = add_buf(start, count); start 893 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; start 903 drivers/s390/cio/qdio_main.c count = qdio_inbound_q_moved(q, start); start 907 drivers/s390/cio/qdio_main.c start = add_buf(start, count); start 908 drivers/s390/cio/qdio_main.c q->first_to_check = start; start 911 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { start 922 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { start 1451 drivers/s390/cio/qdio_main.c static inline int buf_in_between(int bufnr, int start, int count) start 1453 drivers/s390/cio/qdio_main.c int end = add_buf(start, count); start 1455 drivers/s390/cio/qdio_main.c if (end > start) { start 1456 drivers/s390/cio/qdio_main.c if (bufnr >= start && bufnr < end) start 1463 drivers/s390/cio/qdio_main.c if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || start 1667 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; start 1670 drivers/s390/cio/qdio_main.c count = q->is_input_q ? qdio_inbound_q_moved(q, start) : start 1671 drivers/s390/cio/qdio_main.c qdio_outbound_q_moved(q, start); start 1675 drivers/s390/cio/qdio_main.c *bufnr = start; start 1679 drivers/s390/cio/qdio_main.c q->first_to_check = add_buf(start, count); start 359 drivers/s390/net/ism_drv.c static unsigned int max_bytes(unsigned int start, unsigned int len, start 362 drivers/s390/net/ism_drv.c return min(boundary - (start & (boundary - 1)), len); start 902 drivers/s390/net/qeth_core.h static inline int qeth_get_elements_for_range(addr_t start, addr_t end) start 904 drivers/s390/net/qeth_core.h return PFN_UP(end) - PFN_DOWN(start); start 3611 drivers/s390/net/qeth_core_main.c addr_t start = (addr_t)skb->data + data_offset; start 3613 drivers/s390/net/qeth_core_main.c if (start != end) start 3614 drivers/s390/net/qeth_core_main.c elements += qeth_get_elements_for_range(start, end); start 3645 drivers/s390/net/qeth_core_main.c addr_t start, end; start 3650 drivers/s390/net/qeth_core_main.c start = (addr_t)skb->data - hdr_len; start 3653 drivers/s390/net/qeth_core_main.c if (qeth_get_elements_for_range(start, end + contiguous) == 1) { start 5262 drivers/s390/net/qeth_core_main.c unsigned int start, error, i; start 5281 drivers/s390/net/qeth_core_main.c &start, &error); start 5289 drivers/s390/net/qeth_core_main.c for (i = start; i < start + completed; i++) { start 556 drivers/s390/net/qeth_l2_main.c addr_t start = (addr_t)skb->data; start 564 drivers/s390/net/qeth_l2_main.c if (qeth_get_elements_for_range(start, end) > 1) { start 488 drivers/s390/net/qeth_l3_sys.c const char *start, *end; start 492 drivers/s390/net/qeth_l3_sys.c start = buf; start 494 drivers/s390/net/qeth_l3_sys.c end = strchr(start, '/'); start 495 drivers/s390/net/qeth_l3_sys.c if (!end || (end - start >= 40)) { start 498 drivers/s390/net/qeth_l3_sys.c strncpy(buffer, start, end - start); start 502 drivers/s390/net/qeth_l3_sys.c start = end + 1; start 503 drivers/s390/net/qeth_l3_sys.c *mask_bits = simple_strtoul(start, &tmp, 10); start 504 drivers/s390/net/qeth_l3_sys.c if (!strlen(start) || start 505 drivers/s390/net/qeth_l3_sys.c (tmp == start) || start 218 drivers/sbus/char/display7seg.c op->resource[0].start, start 174 drivers/sbus/char/flash.c flash.read_base = op->resource[0].start; start 177 drivers/sbus/char/flash.c flash.write_base = op->resource[1].start; start 180 drivers/sbus/char/flash.c flash.write_base = op->resource[0].start; start 175 drivers/scsi/a2091.c if (!request_mem_region(z->resource.start, 256, "wd33c93")) start 188 drivers/scsi/a2091.c regs = ZTWO_VADDR(z->resource.start); start 222 drivers/scsi/a2091.c release_mem_region(z->resource.start, 256); start 235 drivers/scsi/a2091.c release_mem_region(z->resource.start, 256); start 194 drivers/scsi/a3000.c if (!request_mem_region(res->start, resource_size(res), "wd33c93")) start 206 drivers/scsi/a3000.c regs = ZTWO_VADDR(res->start); start 240 drivers/scsi/a3000.c release_mem_region(res->start, resource_size(res)); start 254 drivers/scsi/a3000.c release_mem_region(res->start, resource_size(res)); start 46 drivers/scsi/a4000t.c if (!request_mem_region(res->start, resource_size(res), start 57 drivers/scsi/a4000t.c scsi_addr = res->start + A4000T_SCSI_OFFSET; start 94 drivers/scsi/a4000t.c release_mem_region(res->start, resource_size(res)); start 108 drivers/scsi/a4000t.c release_mem_region(res->start, resource_size(res)); start 157 drivers/scsi/aacraid/rx.c unsigned long start; start 189 drivers/scsi/aacraid/rx.c start = jiffies; start 194 drivers/scsi/aacraid/rx.c while (time_before(jiffies, start+30*HZ)) start 535 drivers/scsi/aacraid/rx.c unsigned long start; start 585 drivers/scsi/aacraid/rx.c start = jiffies; start 593 drivers/scsi/aacraid/rx.c time_after(jiffies, start+HZ*startup_timeout)) { start 600 drivers/scsi/aacraid/rx.c time_after(jiffies, start + HZ * start 606 drivers/scsi/aacraid/rx.c start = jiffies; start 148 drivers/scsi/aacraid/sa.c unsigned long start; start 172 drivers/scsi/aacraid/sa.c start = jiffies; start 174 drivers/scsi/aacraid/sa.c while(time_before(jiffies, start+30*HZ)) start 310 drivers/scsi/aacraid/sa.c unsigned long start; start 353 drivers/scsi/aacraid/sa.c start = jiffies; start 358 drivers/scsi/aacraid/sa.c if (time_after(jiffies, start+startup_timeout*HZ)) { start 204 drivers/scsi/aacraid/src.c unsigned long start; start 247 drivers/scsi/aacraid/src.c start = jiffies; start 256 drivers/scsi/aacraid/src.c while (time_before(jiffies, start+delay)) { start 708 drivers/scsi/aacraid/src.c unsigned long status, start; start 711 drivers/scsi/aacraid/src.c start = jiffies; start 720 drivers/scsi/aacraid/src.c start = jiffies; start 724 drivers/scsi/aacraid/src.c if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) { start 864 drivers/scsi/aacraid/src.c unsigned long start; start 916 drivers/scsi/aacraid/src.c start = jiffies; start 924 drivers/scsi/aacraid/src.c time_after(jiffies, start+HZ*startup_timeout)) { start 931 drivers/scsi/aacraid/src.c time_after(jiffies, start + HZ * start 937 drivers/scsi/aacraid/src.c start = jiffies; start 1008 drivers/scsi/aacraid/src.c unsigned long start = jiffies; start 1013 drivers/scsi/aacraid/src.c while (time_before(jiffies, start+delay)) { start 1156 drivers/scsi/aacraid/src.c unsigned long start; start 1189 drivers/scsi/aacraid/src.c start = jiffies; start 1192 drivers/scsi/aacraid/src.c if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { start 1231 drivers/scsi/aacraid/src.c start = jiffies; start 1242 drivers/scsi/aacraid/src.c time_after(jiffies, start+HZ*startup_timeout)) { start 1249 drivers/scsi/aacraid/src.c time_after(jiffies, start + HZ * start 1255 drivers/scsi/aacraid/src.c start = jiffies; start 2148 drivers/scsi/advansys.c #define ASC_DBG_PRT_HEX(lvl, name, start, length) start 2196 drivers/scsi/advansys.c #define ASC_DBG_PRT_HEX(lvl, name, start, length) \ start 2199 drivers/scsi/advansys.c asc_prt_hex((name), (start), (length)); \ start 2304 drivers/scsi/aha152x.c unsigned long start = jiffies; start 2379 drivers/scsi/aha152x.c HOSTDATA(shpnt)->time[STATE] += jiffies-start; start 277 drivers/scsi/aic7xxx/aic79xx_osm_pci.c resource_size_t start; start 288 drivers/scsi/aic7xxx/aic79xx_osm_pci.c start = pci_resource_start(ahd->dev_softc, 1); start 289 drivers/scsi/aic7xxx/aic79xx_osm_pci.c base_page = start & PAGE_MASK; start 290 drivers/scsi/aic7xxx/aic79xx_osm_pci.c base_offset = start - base_page; start 291 drivers/scsi/aic7xxx/aic79xx_osm_pci.c if (start != 0) { start 292 drivers/scsi/aic7xxx/aic79xx_osm_pci.c *bus_addr = start; start 293 drivers/scsi/aic7xxx/aic79xx_osm_pci.c if (!request_mem_region(start, 0x1000, "aic79xx")) start 299 drivers/scsi/aic7xxx/aic79xx_osm_pci.c release_mem_region(start, 0x1000); start 365 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c resource_size_t start; start 369 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c start = pci_resource_start(ahc->dev_softc, 1); start 370 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c if (start != 0) { start 371 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c *bus_addr = start; start 372 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c if (!request_mem_region(start, 0x1000, "aic7xxx")) start 375 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c *maddr = ioremap_nocache(start, 256); start 378 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c release_mem_region(start, 0x1000); start 27 drivers/scsi/aic94xx/aic94xx_hwi.h unsigned long start; /* pci resource start */ start 68 drivers/scsi/aic94xx/aic94xx_init.c io_handle->start = pci_resource_start(asd_ha->pcidev, i); start 72 drivers/scsi/aic94xx/aic94xx_init.c if (!io_handle->start || !io_handle->len) { start 83 drivers/scsi/aic94xx/aic94xx_init.c io_handle->addr = ioremap(io_handle->start, io_handle->len); start 123 drivers/scsi/aic94xx/aic94xx_init.c io_handle->start = pci_resource_start(asd_ha->pcidev, i); start 126 drivers/scsi/aic94xx/aic94xx_init.c io_handle->addr = (void __iomem *) io_handle->start; start 127 drivers/scsi/aic94xx/aic94xx_init.c if (!io_handle->start || !io_handle->len) { start 714 drivers/scsi/aic94xx/aic94xx_sds.c static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1) start 716 drivers/scsi/aic94xx/aic94xx_sds.c struct asd_ll_el *el = start; start 726 drivers/scsi/aic94xx/aic94xx_sds.c el = start + le16_to_cpu(el->next); start 727 drivers/scsi/aic94xx/aic94xx_sds.c } while (el != start); start 1312 drivers/scsi/arm/fas216.c static void fas216_send_messageout(FAS216_Info *info, int start) start 1327 drivers/scsi/arm/fas216.c for (i = start; i < msg->length; i++) start 1331 drivers/scsi/arm/fas216.c start = 0; start 798 drivers/scsi/atari_scsi.c instance->irq = irq->start; start 187 drivers/scsi/bfa/bfa.h void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start, start 321 drivers/scsi/bfa/bfa.h void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, start 335 drivers/scsi/bfa/bfa.h void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, start 178 drivers/scsi/bfa/bfa_hw_cb.c bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) start 180 drivers/scsi/bfa/bfa_hw_cb.c *start = BFI_MSIX_RME_QMIN_CB; start 165 drivers/scsi/bfa/bfa_hw_ct.c bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) start 167 drivers/scsi/bfa/bfa_hw_ct.c *start = BFI_MSIX_RME_QMIN_CT; start 69 drivers/scsi/bnx2fc/bnx2fc_els.c unsigned long start = jiffies; start 102 drivers/scsi/bnx2fc/bnx2fc_els.c if (time_after(jiffies, start + (10 * HZ))) { start 680 drivers/scsi/bnx2fc/bnx2fc_io.c unsigned long start = jiffies; start 714 drivers/scsi/bnx2fc/bnx2fc_io.c if (time_after(jiffies, start + HZ)) { start 590 drivers/scsi/csiostor/csio_hw.c csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) start 594 drivers/scsi/csiostor/csio_hw.c while (start <= end) { start 601 drivers/scsi/csiostor/csio_hw.c SF_ERASE_SECTOR | (start << 8)); start 609 drivers/scsi/csiostor/csio_hw.c start++; start 614 drivers/scsi/csiostor/csio_hw.c start, ret); start 258 drivers/scsi/csiostor/csio_hw_t5.c u32 pos, start, offset, memoffset; start 300 drivers/scsi/csiostor/csio_hw_t5.c start = addr & ~(mem_aperture-1); start 301 drivers/scsi/csiostor/csio_hw_t5.c offset = addr - start; start 309 drivers/scsi/csiostor/csio_hw_t5.c start, offset, win_pf); start 313 drivers/scsi/csiostor/csio_hw_t5.c for (pos = start; len > 0; pos += mem_aperture, offset = 0) { start 2094 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); start 2098 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c lldi->vr->iscsi.start, 2, start 2099 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c lldi->vr->ppod_edram.start, start 441 drivers/scsi/cxgbi/libcxgbi.c unsigned int start; start 470 drivers/scsi/cxgbi/libcxgbi.c start = idx = pmap->next; start 488 drivers/scsi/cxgbi/libcxgbi.c } while (idx != start); start 1292 drivers/scsi/cxgbi/libcxgbi.c unsigned int start, unsigned int rsvd_factor, start 1296 drivers/scsi/cxgbi/libcxgbi.c cdev->lldev, tformat, iscsi_size, llimit, start, start 623 drivers/scsi/cxgbi/libcxgbi.h unsigned int start, unsigned int rsvd_factor, start 773 drivers/scsi/dc395x.c struct DeviceCtlBlk *start = NULL; start 795 drivers/scsi/dc395x.c start = dcb; start 798 drivers/scsi/dc395x.c if (!start) { start 800 drivers/scsi/dc395x.c start = list_entry(dcb_list_head->next, typeof(*start), list); start 801 drivers/scsi/dc395x.c acb->dcb_run_robin = start; start 809 drivers/scsi/dc395x.c pos = start; start 832 drivers/scsi/dc395x.c } while (pos != start); start 450 drivers/scsi/fnic/fnic_main.c int (*start)(struct vnic_dev *, int), start 461 drivers/scsi/fnic/fnic_main.c err = start(vdev, arg); start 523 drivers/scsi/g_NCR5380.c int start = 0; start 529 drivers/scsi/g_NCR5380.c if (start == len - 128) { start 546 drivers/scsi/g_NCR5380.c dst + start, 64); start 549 drivers/scsi/g_NCR5380.c dst + start, 128); start 551 drivers/scsi/g_NCR5380.c memcpy_fromio(dst + start, start 553 drivers/scsi/g_NCR5380.c start += 128; start 554 drivers/scsi/g_NCR5380.c } while (start < len); start 556 drivers/scsi/g_NCR5380.c residual = len - start; start 590 drivers/scsi/g_NCR5380.c int start = 0; start 603 drivers/scsi/g_NCR5380.c if (start >= 128) start 604 drivers/scsi/g_NCR5380.c start -= 128; start 605 drivers/scsi/g_NCR5380.c if (start >= 128) start 606 drivers/scsi/g_NCR5380.c start -= 128; start 610 drivers/scsi/g_NCR5380.c if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0) start 615 drivers/scsi/g_NCR5380.c if (start >= 128) start 616 drivers/scsi/g_NCR5380.c start -= 128; start 620 drivers/scsi/g_NCR5380.c if (start >= len) start 625 drivers/scsi/g_NCR5380.c src + start, 64); start 628 drivers/scsi/g_NCR5380.c src + start, 128); start 631 drivers/scsi/g_NCR5380.c src + start, 128); start 632 drivers/scsi/g_NCR5380.c start += 128; start 635 drivers/scsi/g_NCR5380.c residual = len - start; start 292 drivers/scsi/gvp11.c address = z->resource.start; start 375 drivers/scsi/gvp11.c release_mem_region(z->resource.start, 256); start 775 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c int start, end; start 784 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c start = 1; start 796 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c start = 64 * (sata_idx + 1); start 802 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c start = find_next_zero_bit(bitmap, start 803 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c hisi_hba->slot_index_count, start); start 804 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (start >= end) { start 811 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (sata_dev ^ (start & 1)) start 813 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c start++; start 816 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c set_bit(start, bitmap); start 818 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c return start; start 164 drivers/scsi/ibmvscsi/ibmvfc.c entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len); start 510 drivers/scsi/ibmvscsi/ibmvfc.h struct ibmvfc_trace_start_entry start; start 166 drivers/scsi/isci/phy.c phy_cap.start = 1; start 123 drivers/scsi/isci/phy.h u8 start:1; start 130 drivers/scsi/jazz_esp.c esp->regs = (void __iomem *)res->start; start 138 drivers/scsi/jazz_esp.c esp->dma_regs = (void __iomem *)res->start; start 89 drivers/scsi/lasi700.c unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET; start 1513 drivers/scsi/lpfc/lpfc_nvme.c uint64_t start = 0; start 1561 drivers/scsi/lpfc/lpfc_nvme.c start = ktime_get_ns(); start 1644 drivers/scsi/lpfc/lpfc_nvme.c if (start) { start 1645 drivers/scsi/lpfc/lpfc_nvme.c lpfc_ncmd->ts_cmd_start = start; start 213 drivers/scsi/mac_scsi.c static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) start 215 drivers/scsi/mac_scsi.c unsigned char *addr = start; start 233 drivers/scsi/mac_scsi.c return start - addr; /* Negated to indicate uncertain length */ start 237 drivers/scsi/mac_scsi.c return addr - start; start 240 drivers/scsi/mac_scsi.c static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) start 242 drivers/scsi/mac_scsi.c unsigned char *addr = start; start 260 drivers/scsi/mac_scsi.c return start - addr; /* Negated to indicate uncertain length */ start 264 drivers/scsi/mac_scsi.c return addr - start; start 457 drivers/scsi/mac_scsi.c if (!hwreg_present((unsigned char *)pio_mem->start + start 459 drivers/scsi/mac_scsi.c pr_info(PFX "no device detected at %pap\n", &pio_mem->start); start 478 drivers/scsi/mac_scsi.c instance->irq = irq->start; start 483 drivers/scsi/mac_scsi.c hostdata->base = pio_mem->start; start 484 drivers/scsi/mac_scsi.c hostdata->io = (u8 __iomem *)pio_mem->start; start 487 drivers/scsi/mac_scsi.c hostdata->pdma_io = (u8 __iomem *)pdma_mem->start; start 2501 drivers/scsi/megaraid.c proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) start 2578 drivers/scsi/megaraid.c for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { start 1954 drivers/scsi/myrb.c int rc, start; start 1957 drivers/scsi/myrb.c rc = kstrtoint(buf, 0, &start); start 1965 drivers/scsi/myrb.c if (start) { start 2013 drivers/scsi/myrb.c start ? "Initiated" : "Cancelled"); start 2016 drivers/scsi/myrb.c if (!start) { start 2061 drivers/scsi/myrb.c int rc, start; start 2064 drivers/scsi/myrb.c rc = kstrtoint(buf, 0, &start); start 2072 drivers/scsi/myrb.c if (start) { start 2120 drivers/scsi/myrb.c start ? "Initiated" : "Cancelled"); start 2123 drivers/scsi/myrb.c if (!start) { start 1516 drivers/scsi/ncr53c8xx.c struct launch start; start 1795 drivers/scsi/ncr53c8xx.c ncrcmd start [ 5]; start 2358 drivers/scsi/ncr53c8xx.c PADDR (start), start 2426 drivers/scsi/ncr53c8xx.c PADDR(start), start 2447 drivers/scsi/ncr53c8xx.c PADDR(start), start 2467 drivers/scsi/ncr53c8xx.c PADDR (start), start 2578 drivers/scsi/ncr53c8xx.c PADDR(start), start 3213 drivers/scsi/ncr53c8xx.c PADDR (start), start 3371 drivers/scsi/ncr53c8xx.c PADDR (start), start 3373 drivers/scsi/ncr53c8xx.c PADDR (start), start 3384 drivers/scsi/ncr53c8xx.c PADDR (start), start 3515 drivers/scsi/ncr53c8xx.c ncrcmd *start, *end; start 3519 drivers/scsi/ncr53c8xx.c start = src; start 3536 drivers/scsi/ncr53c8xx.c ncr_name(np), (int) (src-start-1)); start 3566 drivers/scsi/ncr53c8xx.c ncr_name(np), (int) (src-start-1)); start 4348 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); start 4453 drivers/scsi/ncr53c8xx.c np->scripth->tryloop [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, start)); start 4669 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_paddr = start 5090 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_paddr = start 5348 drivers/scsi/ncr53c8xx.c OUTL_DSP (NCB_SCRIPT_PHYS (np, start)); start 6558 drivers/scsi/ncr53c8xx.c cp2->start.schedule.l_paddr = start 6652 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_paddr = start 6659 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_paddr = start 6664 drivers/scsi/ncr53c8xx.c OUTL_DSP (NCB_SCRIPT_PHYS (np, start)); start 6706 drivers/scsi/ncr53c8xx.c OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, start)); start 7329 drivers/scsi/ncr53c8xx.c cp->start.setup_dsa[0] = cpu_to_scr(copy_4); start 7330 drivers/scsi/ncr53c8xx.c cp->start.setup_dsa[1] = cpu_to_scr(CCB_PHYS(cp, start.p_phys)); start 7331 drivers/scsi/ncr53c8xx.c cp->start.setup_dsa[2] = cpu_to_scr(ncr_reg_bus_addr(nc_dsa)); start 7332 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_cmd = cpu_to_scr(SCR_JUMP); start 7333 drivers/scsi/ncr53c8xx.c cp->start.p_phys = cpu_to_scr(CCB_PHYS(cp, phys)); start 7335 drivers/scsi/ncr53c8xx.c memcpy(&cp->restart, &cp->start, sizeof(cp->restart)); start 7337 drivers/scsi/ncr53c8xx.c cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); start 8423 drivers/scsi/ncr53c8xx.c np->script0->start[0] = start 127 drivers/scsi/pcmcia/aha152x_stub.c p_dev->resource[0]->start = p_dev->resource[1]->start; start 129 drivers/scsi/pcmcia/aha152x_stub.c if (p_dev->resource[0]->start >= 0xffff) start 132 drivers/scsi/pcmcia/aha152x_stub.c p_dev->resource[1]->start = p_dev->resource[1]->end = 0; start 163 drivers/scsi/pcmcia/aha152x_stub.c s.io_port = link->resource[0]->start; start 47 drivers/scsi/pcmcia/fdomain_cs.c if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE, start 51 drivers/scsi/pcmcia/fdomain_cs.c sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev); start 63 drivers/scsi/pcmcia/fdomain_cs.c release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE); start 72 drivers/scsi/pcmcia/fdomain_cs.c release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE); start 1565 drivers/scsi/pcmcia/nsp_cs.c ioremap_nocache(p_dev->resource[2]->start, start 1604 drivers/scsi/pcmcia/nsp_cs.c release_region(link->resource[0]->start, start 1608 drivers/scsi/pcmcia/nsp_cs.c release_region(link->resource[1]->start, start 1614 drivers/scsi/pcmcia/nsp_cs.c data->BaseAddress = link->resource[0]->start; start 182 drivers/scsi/pcmcia/qlogic_stub.c if (p_dev->resource[0]->start == 0) start 209 drivers/scsi/pcmcia/qlogic_stub.c outb(0xb4, link->resource[0]->start + 0xd); start 210 drivers/scsi/pcmcia/qlogic_stub.c outb(0x24, link->resource[0]->start + 0x9); start 211 drivers/scsi/pcmcia/qlogic_stub.c outb(0x04, link->resource[0]->start + 0xd); start 217 drivers/scsi/pcmcia/qlogic_stub.c link->resource[0]->start + 16, link->irq); start 220 drivers/scsi/pcmcia/qlogic_stub.c link->resource[0]->start, link->irq); start 266 drivers/scsi/pcmcia/qlogic_stub.c outb(0x80, link->resource[0]->start + 0xd); start 267 drivers/scsi/pcmcia/qlogic_stub.c outb(0x24, link->resource[0]->start + 0x9); start 268 drivers/scsi/pcmcia/qlogic_stub.c outb(0x04, link->resource[0]->start + 0xd); start 683 drivers/scsi/pcmcia/sym53c500_cs.c if (p_dev->resource[0]->start == 0) start 723 drivers/scsi/pcmcia/sym53c500_cs.c outb(0xb4, link->resource[0]->start + 0xd); start 724 drivers/scsi/pcmcia/sym53c500_cs.c outb(0x24, link->resource[0]->start + 0x9); start 725 drivers/scsi/pcmcia/sym53c500_cs.c outb(0x04, link->resource[0]->start + 0xd); start 738 drivers/scsi/pcmcia/sym53c500_cs.c port_base = link->resource[0]->start; start 811 drivers/scsi/pcmcia/sym53c500_cs.c outb(0x80, link->resource[0]->start + 0xd); start 812 drivers/scsi/pcmcia/sym53c500_cs.c outb(0x24, link->resource[0]->start + 0x9); start 813 drivers/scsi/pcmcia/sym53c500_cs.c outb(0x04, link->resource[0]->start + 0xd); start 819 drivers/scsi/pcmcia/sym53c500_cs.c SYM53C500_int_host_reset(link->resource[0]->start); start 377 drivers/scsi/pm8001/pm8001_ctl.c int start = 0; start 384 drivers/scsi/pm8001/pm8001_ctl.c str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); start 385 drivers/scsi/pm8001/pm8001_ctl.c start = start + 4; start 410 drivers/scsi/pm8001/pm8001_ctl.c int start = 0; start 417 drivers/scsi/pm8001/pm8001_ctl.c str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); start 418 drivers/scsi/pm8001/pm8001_ctl.c start = start + 4; start 498 drivers/scsi/pm8001/pm8001_ctl.c static u32 start, end, count; start 504 drivers/scsi/pm8001/pm8001_ctl.c start = 0; start 508 drivers/scsi/pm8001/pm8001_ctl.c start = end; start 512 drivers/scsi/pm8001/pm8001_ctl.c for (; start < end; start++) start 513 drivers/scsi/pm8001/pm8001_ctl.c str += sprintf(str, "%08x ", *(temp+start)); start 385 drivers/scsi/pm8001/pm8001_hwi.c unsigned long start; start 391 drivers/scsi/pm8001/pm8001_hwi.c start = jiffies + HZ; /* 1 sec */ start 394 drivers/scsi/pm8001/pm8001_hwi.c } while ((regVal != shiftValue) && time_before(jiffies, start)); start 53 drivers/scsi/pm8001/pm80xx_hwi.c unsigned long start; start 56 drivers/scsi/pm8001/pm80xx_hwi.c start = jiffies + HZ; /* 1 sec */ start 59 drivers/scsi/pm8001/pm80xx_hwi.c } while ((reg_val != shift_value) && time_before(jiffies, start)); start 95 drivers/scsi/pm8001/pm80xx_hwi.c unsigned long start; start 225 drivers/scsi/pm8001/pm80xx_hwi.c start = jiffies + (2 * HZ); /* 2 sec */ start 230 drivers/scsi/pm8001/pm80xx_hwi.c } while ((reg_val) && time_before(jiffies, start)); start 3326 drivers/scsi/qedf/qedf_main.c rc = qed_ops->start(qedf->cdev, &qedf->tasks); start 3426 drivers/scsi/qedf/qedf_main.c rc = qed_ops->ll2->start(qedf->cdev, ¶ms); start 206 drivers/scsi/qedi/qedi.h u16 start; start 525 drivers/scsi/qedi/qedi_main.c id_tbl->start = start_id; start 546 drivers/scsi/qedi/qedi_main.c id -= id_tbl->start; start 577 drivers/scsi/qedi/qedi_main.c id += id_tbl->start; start 590 drivers/scsi/qedi/qedi_main.c id -= id_tbl->start; start 1947 drivers/scsi/qedi/qedi_main.c qedi_ops->ll2->start(qedi->cdev, ¶ms); start 2534 drivers/scsi/qedi/qedi_main.c qedi_ops->ll2->start(qedi->cdev, ¶ms); start 2542 drivers/scsi/qedi/qedi_main.c rc = qedi_ops->start(qedi->cdev, &qedi->tasks, start 337 drivers/scsi/qla2xxx/qla_attr.c uint32_t start = 0; start 348 drivers/scsi/qla2xxx/qla_attr.c if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) start 350 drivers/scsi/qla2xxx/qla_attr.c if (start > ha->optrom_size) start 352 drivers/scsi/qla2xxx/qla_attr.c if (size > ha->optrom_size - start) start 353 drivers/scsi/qla2xxx/qla_attr.c size = ha->optrom_size - start; start 382 drivers/scsi/qla2xxx/qla_attr.c ha->optrom_region_start = start; start 442 drivers/scsi/qla2xxx/qla_attr.c if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) start 448 drivers/scsi/qla2xxx/qla_attr.c "Invalid start region 0x%x/0x%x.\n", start, size); start 453 drivers/scsi/qla2xxx/qla_attr.c ha->optrom_region_start = start; start 1383 drivers/scsi/qla2xxx/qla_bsg.c uint32_t start = 0; start 1390 drivers/scsi/qla2xxx/qla_bsg.c start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; start 1391 drivers/scsi/qla2xxx/qla_bsg.c if (start > ha->optrom_size) { start 1393 drivers/scsi/qla2xxx/qla_bsg.c "start %d > optrom_size %d.\n", start, ha->optrom_size); start 1403 drivers/scsi/qla2xxx/qla_bsg.c ha->optrom_region_start = start; start 1406 drivers/scsi/qla2xxx/qla_bsg.c if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) start 1408 drivers/scsi/qla2xxx/qla_bsg.c else if (start == (ha->flt_region_boot * 4) || start 1409 drivers/scsi/qla2xxx/qla_bsg.c start == (ha->flt_region_fw * 4)) start 1417 drivers/scsi/qla2xxx/qla_bsg.c "Invalid start region 0x%x/0x%x.\n", start, start 1422 drivers/scsi/qla2xxx/qla_bsg.c ha->optrom_region_size = start + start 1424 drivers/scsi/qla2xxx/qla_bsg.c ha->optrom_size - start : start 1428 drivers/scsi/qla2xxx/qla_bsg.c ha->optrom_region_size = start + start 1430 drivers/scsi/qla2xxx/qla_bsg.c ha->optrom_size - start : start 1536 drivers/scsi/qla2xxx/qla_fw.h uint32_t start; start 4579 drivers/scsi/qla2xxx/qla_mbx.c qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) start 4594 drivers/scsi/qla2xxx/qla_mbx.c mcp->mb[2] = LSW(start); start 4595 drivers/scsi/qla2xxx/qla_mbx.c mcp->mb[3] = MSW(start); start 698 drivers/scsi/qla2xxx/qla_nx.c u64 start; start 710 drivers/scsi/qla2xxx/qla_nx.c start = qla82xx_pci_set_window(ha, off); start 711 drivers/scsi/qla2xxx/qla_nx.c if ((start == -1UL) || start 723 drivers/scsi/qla2xxx/qla_nx.c mem_page = start & PAGE_MASK; start 727 drivers/scsi/qla2xxx/qla_nx.c if (mem_page != ((start + size - 1) & PAGE_MASK)) start 736 drivers/scsi/qla2xxx/qla_nx.c addr += start & (PAGE_SIZE - 1); start 770 drivers/scsi/qla2xxx/qla_nx.c u64 start; start 782 drivers/scsi/qla2xxx/qla_nx.c start = qla82xx_pci_set_window(ha, off); start 783 drivers/scsi/qla2xxx/qla_nx.c if ((start == -1UL) || start 795 drivers/scsi/qla2xxx/qla_nx.c mem_page = start & PAGE_MASK; start 799 drivers/scsi/qla2xxx/qla_nx.c if (mem_page != ((start + size - 1) & PAGE_MASK)) start 807 drivers/scsi/qla2xxx/qla_nx.c addr += start & (PAGE_SIZE - 1); start 1472 drivers/scsi/qla2xxx/qla_nx.c int i, j = 0, k, start, end, loop, sz[2], off0[2]; start 1521 drivers/scsi/qla2xxx/qla_nx.c start = off0[i] >> 2; start 1523 drivers/scsi/qla2xxx/qla_nx.c for (k = start; k <= end; k++) { start 548 drivers/scsi/qla2xxx/qla_sup.c qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) start 565 drivers/scsi/qla2xxx/qla_sup.c *start = 0; start 567 drivers/scsi/qla2xxx/qla_sup.c *start = FA_FLASH_LAYOUT_ADDR_24; start 569 drivers/scsi/qla2xxx/qla_sup.c *start = FA_FLASH_LAYOUT_ADDR; start 571 drivers/scsi/qla2xxx/qla_sup.c *start = FA_FLASH_LAYOUT_ADDR_81; start 573 drivers/scsi/qla2xxx/qla_sup.c *start = FA_FLASH_LAYOUT_ADDR_82; start 576 drivers/scsi/qla2xxx/qla_sup.c *start = FA_FLASH_LAYOUT_ADDR_83; start 579 drivers/scsi/qla2xxx/qla_sup.c *start = FA_FLASH_LAYOUT_ADDR_28; start 627 drivers/scsi/qla2xxx/qla_sup.c *start = (le16_to_cpu(fltl->start_hi) << 16 | start 632 drivers/scsi/qla2xxx/qla_sup.c loc, *start); start 675 drivers/scsi/qla2xxx/qla_sup.c uint32_t start; start 712 drivers/scsi/qla2xxx/qla_sup.c start = le32_to_cpu(region->start) >> 2; start 715 drivers/scsi/qla2xxx/qla_sup.c le16_to_cpu(region->code), start, start 726 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fw = start; start 731 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fw = start; start 734 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_boot = start; start 739 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd_nvram = start; start 743 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd = start; start 749 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd = start; start 755 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd = start; start 761 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd = start; start 767 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram = start; start 773 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram = start; start 779 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram = start; start 785 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram = start; start 788 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fdt = start; start 792 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_npiv_conf = start; start 796 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_npiv_conf = start; start 799 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_gold_fw = start; start 803 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fcp_prio = start; start 807 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fcp_prio = start; start 810 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_boot = start; start 814 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_boot = start; start 817 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fw = start; start 821 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fw = start; start 824 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_gold_fw = start; start 827 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_bootload = start; start 831 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd = start; start 837 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram = start; start 843 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram = start; start 847 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_img_status_pri = start; start 851 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_img_status_sec = start; start 855 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_fw_sec = start; start 859 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_boot_sec = start; start 863 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_aux_img_status_pri = start; start 867 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_aux_img_status_sec = start; start 872 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram_sec = start; start 877 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram_sec = start; start 882 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram_sec = start; start 887 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_nvram_sec = start; start 892 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd_nvram_sec = start; start 894 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd_sec = start; start 901 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd_sec = start; start 907 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd_sec = start; start 913 drivers/scsi/qla2xxx/qla_sup.c ha->flt_region_vpd_sec = start; start 1253 drivers/scsi/qla2xxx/qla_sup.c uint32_t start, finish; start 1256 drivers/scsi/qla2xxx/qla_sup.c start = fdata >> 2; start 1257 drivers/scsi/qla2xxx/qla_sup.c finish = start + (ha->fdt_block_size >> 2) - 1; start 1259 drivers/scsi/qla2xxx/qla_sup.c start), flash_data_addr(ha, finish)); start 2651 drivers/scsi/qla2xxx/qla_sup.c qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, start 2668 drivers/scsi/qla2xxx/qla_sup.c if (flt_reg->start == start) { start 261 drivers/scsi/qla2xxx/qla_tmpl.c ulong start = le32_to_cpu(ent->t262.start_addr); start 275 drivers/scsi/qla2xxx/qla_tmpl.c start = vha->hw->fw_shared_ram_start; start 278 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.start_addr = cpu_to_le32(start); start 282 drivers/scsi/qla2xxx/qla_tmpl.c start = vha->hw->fw_ddr_ram_start; start 285 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.start_addr = cpu_to_le32(start); start 290 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.start_addr = cpu_to_le32(start); start 300 drivers/scsi/qla2xxx/qla_tmpl.c if (end < start || start == 0 || end == 0) { start 303 drivers/scsi/qla2xxx/qla_tmpl.c __func__, start, end); start 308 drivers/scsi/qla2xxx/qla_tmpl.c dwords = end - start + 1; start 311 drivers/scsi/qla2xxx/qla_tmpl.c qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); start 579 drivers/scsi/qla2xxx/qla_tmpl.c ulong start = le32_to_cpu(ent->t272.addr); start 585 drivers/scsi/qla2xxx/qla_tmpl.c "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); start 587 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); start 358 drivers/scsi/qla4xxx/ql4_fw.h uint32_t start; start 718 drivers/scsi/qla4xxx/ql4_nx.c u64 start; start 729 drivers/scsi/qla4xxx/ql4_nx.c start = qla4_82xx_pci_set_window(ha, off); start 730 drivers/scsi/qla4xxx/ql4_nx.c if ((start == -1UL) || start 738 drivers/scsi/qla4xxx/ql4_nx.c addr = qla4_8xxx_pci_base_offsetfset(ha, start); start 742 drivers/scsi/qla4xxx/ql4_nx.c mem_page = start & PAGE_MASK; start 746 drivers/scsi/qla4xxx/ql4_nx.c if (mem_page != ((start + size - 1) & PAGE_MASK)) start 756 drivers/scsi/qla4xxx/ql4_nx.c addr += start & (PAGE_SIZE - 1); start 791 drivers/scsi/qla4xxx/ql4_nx.c u64 start; start 802 drivers/scsi/qla4xxx/ql4_nx.c start = qla4_82xx_pci_set_window(ha, off); start 803 drivers/scsi/qla4xxx/ql4_nx.c if ((start == -1UL) || start 811 drivers/scsi/qla4xxx/ql4_nx.c addr = qla4_8xxx_pci_base_offsetfset(ha, start); start 815 drivers/scsi/qla4xxx/ql4_nx.c mem_page = start & PAGE_MASK; start 819 drivers/scsi/qla4xxx/ql4_nx.c if (mem_page != ((start + size - 1) & PAGE_MASK)) start 827 drivers/scsi/qla4xxx/ql4_nx.c addr += start & (PAGE_SIZE - 1); start 1369 drivers/scsi/qla4xxx/ql4_nx.c int i, j = 0, k, start, end, loop, sz[2], off0[2]; start 1420 drivers/scsi/qla4xxx/ql4_nx.c start = off0[i] >> 2; start 1422 drivers/scsi/qla4xxx/ql4_nx.c for (k = start; k <= end; k++) { start 3703 drivers/scsi/qla4xxx/ql4_nx.c qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start) start 3713 drivers/scsi/qla4xxx/ql4_nx.c *start = FA_FLASH_LAYOUT_ADDR_82; start 3715 drivers/scsi/qla4xxx/ql4_nx.c DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); start 3725 drivers/scsi/qla4xxx/ql4_nx.c uint32_t start, status; start 3771 drivers/scsi/qla4xxx/ql4_nx.c start = le32_to_cpu(region->start) >> 2; start 3774 drivers/scsi/qla4xxx/ql4_nx.c "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, start 3779 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_region_fdt = start; start 3782 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_region_boot = start; start 3786 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_region_fw = start; start 3789 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_region_bootload = start; start 3792 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_iscsi_param = start; start 3795 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_region_chap = start; start 3799 drivers/scsi/qla4xxx/ql4_nx.c hw->flt_region_ddb = start; start 2606 drivers/scsi/scsi_debug.c void *start = dif_store(sector); start 2609 drivers/scsi/scsi_debug.c if (dif_store_end < start + len) start 2610 drivers/scsi/scsi_debug.c rest = start + len - dif_store_end; start 2615 drivers/scsi/scsi_debug.c memcpy(paddr, start, len - rest); start 2617 drivers/scsi/scsi_debug.c memcpy(start, paddr, len - rest); start 691 drivers/scsi/scsi_devinfo.c .start = devinfo_seq_start, start 375 drivers/scsi/scsi_proc.c static inline struct device *next_scsi_device(struct device *start) start 377 drivers/scsi/scsi_proc.c struct device *next = bus_find_next_device(&scsi_bus_type, start); start 379 drivers/scsi/scsi_proc.c put_device(start); start 417 drivers/scsi/scsi_proc.c .start = scsi_seq_start, start 1810 drivers/scsi/scsi_scan.c unsigned long start = jiffies; start 1814 drivers/scsi/scsi_scan.c while (!shost->hostt->scan_finished(shost, jiffies - start)) start 3488 drivers/scsi/sd.c static int sd_start_stop_device(struct scsi_disk *sdkp, int start) start 3495 drivers/scsi/sd.c if (start) start 3499 drivers/scsi/sd.c cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ start 45 drivers/scsi/sd_zbc.c zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16])); start 49 drivers/scsi/sd_zbc.c zone->wp = zone->start + zone->len; start 2331 drivers/scsi/sg.c .start = dev_seq_start, start 2339 drivers/scsi/sg.c .start = dev_seq_start, start 2347 drivers/scsi/sg.c .start = dev_seq_start, start 180 drivers/scsi/sgiwd93.c unsigned long start, end; start 182 drivers/scsi/sgiwd93.c start = (unsigned long) hcp; start 183 drivers/scsi/sgiwd93.c end = start + HPC_DMA_SIZE; start 184 drivers/scsi/sgiwd93.c while (start < end) { start 189 drivers/scsi/sgiwd93.c start += sizeof(struct hpc_chunk); start 67 drivers/scsi/sni_53c710.c base = res->start; start 443 drivers/scsi/snic/snic_debugfs.c .start = snic_trc_seq_start, start 188 drivers/scsi/snic/snic_main.c int (*start)(struct vnic_dev *, int), start 196 drivers/scsi/snic/snic_main.c ret = start(vdev, arg); start 1205 drivers/scsi/snic/snic_scsi.c u64 start = jiffies, cmpl_time; start 1268 drivers/scsi/snic/snic_scsi.c cmpl_time = jiffies - start; start 4902 drivers/scsi/st.c unsigned long start = uaddr >> PAGE_SHIFT; start 4903 drivers/scsi/st.c const int nr_pages = end - start; start 541 drivers/scsi/sun3_scsi.c ioaddr = sun3_ioremap(mem->start, resource_size(mem), start 568 drivers/scsi/sun3_scsi.c ioaddr = ioremap(mem->start, resource_size(mem)); start 586 drivers/scsi/sun3_scsi.c instance->irq = irq->start; start 589 drivers/scsi/sun3_scsi.c hostdata->base = mem->start; start 190 drivers/scsi/sun3x_esp.c if (!res || !res->start) start 193 drivers/scsi/sun3x_esp.c esp->regs = ioremap_nocache(res->start, 0x20); start 198 drivers/scsi/sun3x_esp.c if (!res || !res->start) start 201 drivers/scsi/sun3x_esp.c esp->dma_regs = ioremap_nocache(res->start, 0x10); start 109 drivers/scsi/sym53c8xx_2/sym_fw.c scripta0->start[0] = cpu_to_scr(SCR_NO_OP); start 153 drivers/scsi/sym53c8xx_2/sym_fw.c scripta0->start[0] = cpu_to_scr(SCR_NO_OP); start 348 drivers/scsi/sym53c8xx_2/sym_fw.c void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len) start 354 drivers/scsi/sym53c8xx_2/sym_fw.c cur = start; start 355 drivers/scsi/sym53c8xx_2/sym_fw.c end = start + len/4; start 369 drivers/scsi/sym53c8xx_2/sym_fw.c sym_name(np), (int) (cur-start)); start 384 drivers/scsi/sym53c8xx_2/sym_fw.c printf ("%d: <%x>\n", (int) (cur-start), start 412 drivers/scsi/sym53c8xx_2/sym_fw.c sym_name(np), (int) (cur-start)); start 33 drivers/scsi/sym53c8xx_2/sym_fw.h SYM_GEN_A(s, start) SYM_GEN_A(s, getjob_begin) \ start 43 drivers/scsi/sym53c8xx_2/sym_fw1.h u32 start [ 11]; start 456 drivers/scsi/sym53c8xx_2/sym_fw1.h PADDR_A (start), start 739 drivers/scsi/sym53c8xx_2/sym_fw1.h PADDR_A (start), start 831 drivers/scsi/sym53c8xx_2/sym_fw1.h PADDR_A (start), start 878 drivers/scsi/sym53c8xx_2/sym_fw1.h PADDR_A(start), start 1378 drivers/scsi/sym53c8xx_2/sym_fw1.h PADDR_A (start), start 1648 drivers/scsi/sym53c8xx_2/sym_fw1.h PADDR_A (start), start 43 drivers/scsi/sym53c8xx_2/sym_fw2.h u32 start [ 14]; start 441 drivers/scsi/sym53c8xx_2/sym_fw2.h PADDR_A (start), start 714 drivers/scsi/sym53c8xx_2/sym_fw2.h PADDR_A (start), start 794 drivers/scsi/sym53c8xx_2/sym_fw2.h PADDR_A (start), start 840 drivers/scsi/sym53c8xx_2/sym_fw2.h PADDR_A(start), start 1257 drivers/scsi/sym53c8xx_2/sym_fw2.h PADDR_A (start), start 1521 drivers/scsi/sym53c8xx_2/sym_fw2.h PADDR_A (start), start 1518 drivers/scsi/sym53c8xx_2/sym_glue.c device->mmio_base = bus_addr.start; start 1529 drivers/scsi/sym53c8xx_2/sym_glue.c device->ram_base = bus_addr.start; start 2209 drivers/scsi/sym53c8xx_2/sym_hipd.c OUTL_DSP(np, SCRIPTA_BA(np, start)); start 3081 drivers/scsi/sym53c8xx_2/sym_hipd.c OUTL_DSP(np, SCRIPTA_BA(np, start)); start 3157 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); start 4903 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); start 5228 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); start 5413 drivers/scsi/sym53c8xx_2/sym_hipd.c OUTL_DSP(np, SCRIPTA_BA(np, start)); start 5731 drivers/scsi/sym53c8xx_2/sym_hipd.c np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); start 5735 drivers/scsi/sym53c8xx_2/sym_hipd.c np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); start 5739 drivers/scsi/sym53c8xx_2/sym_hipd.c np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); start 5743 drivers/scsi/sym53c8xx_2/sym_hipd.c np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); start 545 drivers/scsi/sym53c8xx_2/sym_hipd.h u32 start; /* Jumped by SCRIPTS after selection */ start 1036 drivers/scsi/sym53c8xx_2/sym_hipd.h void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len); start 908 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 968 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), ret); start 1013 drivers/scsi/ufs/ufshcd.c ktime_t start; start 1021 drivers/scsi/ufs/ufshcd.c start = ktime_get(); start 1039 drivers/scsi/ufs/ufshcd.c if (ktime_to_us(ktime_sub(ktime_get(), start)) > start 1226 drivers/scsi/ufs/ufshcd.c ktime_t start; start 1258 drivers/scsi/ufs/ufshcd.c start = ktime_get(); start 1263 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), ret); start 1529 drivers/scsi/ufs/ufshcd.c start: start 1550 drivers/scsi/ufs/ufshcd.c goto start; start 1589 drivers/scsi/ufs/ufshcd.c goto start; start 3884 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 3891 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), ret); start 3936 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 3943 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), ret); start 6874 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 6998 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), start 7283 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 7350 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), ret); start 7996 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 8024 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), start 8042 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 8057 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), start 8076 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 8087 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), start 8117 drivers/scsi/ufs/ufshcd.c ktime_t start = ktime_get(); start 8128 drivers/scsi/ufs/ufshcd.c ktime_to_us(ktime_sub(ktime_get(), start)), start 92 drivers/scsi/zalon.c void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096); start 131 drivers/scsi/zalon.c device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET; start 344 drivers/sfi/sfi_core.c void *start; start 347 drivers/sfi/sfi_core.c start = sfi_map_memory(SFI_SYST_SEARCH_BEGIN, len); start 348 drivers/sfi/sfi_core.c if (!start) start 354 drivers/sfi/sfi_core.c syst_hdr = start + offset; start 379 drivers/sfi/sfi_core.c sfi_unmap_memory(start, len); start 383 drivers/sfi/sfi_core.c sfi_unmap_memory(start, len); start 214 drivers/sh/intc/core.c d->window[k].phys = res->start; start 216 drivers/sh/intc/core.c d->window[k].virt = ioremap_nocache(res->start, start 34 drivers/sh/superhyway/superhyway-sysfs.c superhyway_ro_attr(resource, "0x%08lx\n", resource[0].start); start 76 drivers/sh/superhyway/superhyway.c dev->resource->start = base; start 77 drivers/sh/superhyway/superhyway.c dev->resource->end = dev->resource->start + 0x01000000; start 101 drivers/sh/superhyway/superhyway.c ret |= superhyway_add_device(dev->resource[0].start, dev, bus); start 481 drivers/slimbus/qcom-ctrl.c ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start, start 1430 drivers/slimbus/qcom-ngd-ctrl.c ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt, start 224 drivers/soc/aspeed/aspeed-lpc-ctrl.c lpc_ctrl->pnor_base = resm.start; start 243 drivers/soc/aspeed/aspeed-lpc-ctrl.c lpc_ctrl->mem_base = resm.start; start 359 drivers/soc/aspeed/aspeed-p2a-ctrl.c misc_ctrl->mem_base = resm.start; start 153 drivers/soc/bcm/bcm2835-power.c u64 start; start 158 drivers/soc/bcm/bcm2835-power.c start = ktime_get_ns(); start 164 drivers/soc/bcm/bcm2835-power.c if (ktime_get_ns() - start >= 1000) start 173 drivers/soc/bcm/bcm2835-power.c u64 start; start 178 drivers/soc/bcm/bcm2835-power.c start = ktime_get_ns(); start 184 drivers/soc/bcm/bcm2835-power.c if (ktime_get_ns() - start >= 1000) start 211 drivers/soc/bcm/bcm2835-power.c u64 start; start 231 drivers/soc/bcm/bcm2835-power.c start = ktime_get_ns(); start 234 drivers/soc/bcm/bcm2835-power.c if (ktime_get_ns() - start >= 3000) start 250 drivers/soc/bcm/bcm2835-power.c start = ktime_get_ns(); start 253 drivers/soc/bcm/bcm2835-power.c if (ktime_get_ns() - start >= 1000) { start 128 drivers/soc/bcm/brcmstb/pm/pm-arm.c sram = __arm_ioremap_exec(res.start, resource_size(&res), false); start 74 drivers/soc/fsl/dpaa2-console.c mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr)); start 207 drivers/soc/fsl/dpio/dpio-driver.c desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, start 211 drivers/soc/fsl/dpio/dpio-driver.c desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start, start 222 drivers/soc/fsl/dpio/dpio-driver.c desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start, start 225 drivers/soc/fsl/qbman/bman_ccsr.c bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); start 142 drivers/soc/fsl/qbman/bman_portal.c pcfg->addr_virt_ce = memremap(addr_phys[0]->start, start 150 drivers/soc/fsl/qbman/bman_portal.c pcfg->addr_virt_ci = ioremap(addr_phys[1]->start, start 769 drivers/soc/fsl/qbman/qman_ccsr.c qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); start 282 drivers/soc/fsl/qbman/qman_portal.c pcfg->addr_virt_ce = memremap(addr_phys[0]->start, start 290 drivers/soc/fsl/qbman/qman_portal.c pcfg->addr_virt_ci = ioremap(addr_phys[1]->start, start 84 drivers/soc/fsl/qe/qe.c qebase = res.start; start 35 drivers/soc/fsl/qe/qe_common.c unsigned long start; start 85 drivers/soc/fsl/qe/qe_common.c ret = gen_pool_add(muram_pool, r.start - muram_pbase + start 119 drivers/soc/fsl/qe/qe_common.c unsigned long start; start 124 drivers/soc/fsl/qe/qe_common.c start = gen_pool_alloc_algo(muram_pool, size, algo, data); start 125 drivers/soc/fsl/qe/qe_common.c if (!start) start 127 drivers/soc/fsl/qe/qe_common.c start = start - GENPOOL_OFFSET; start 128 drivers/soc/fsl/qe/qe_common.c memset_io(cpm_muram_addr(start), 0, size); start 132 drivers/soc/fsl/qe/qe_common.c entry->start = start; start 136 drivers/soc/fsl/qe/qe_common.c return start; start 138 drivers/soc/fsl/qe/qe_common.c gen_pool_free(muram_pool, start, size); start 154 drivers/soc/fsl/qe/qe_common.c unsigned long start; start 160 drivers/soc/fsl/qe/qe_common.c start = cpm_muram_alloc_common(size, gen_pool_first_fit_align, start 163 drivers/soc/fsl/qe/qe_common.c return start; start 180 drivers/soc/fsl/qe/qe_common.c if (tmp->start == offset) { start 203 drivers/soc/fsl/qe/qe_common.c unsigned long start; start 209 drivers/soc/fsl/qe/qe_common.c start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc, start 212 drivers/soc/fsl/qe/qe_common.c return start; start 339 drivers/soc/fsl/qe/qe_ic.c qe_ic->regs = ioremap(res.start, resource_size(&res)); start 39 drivers/soc/fsl/qe/qe_io.c par_io = ioremap(res.start, resource_size(&res)); start 694 drivers/soc/ixp4xx/ixp4xx-npe.c i, res->start, res->end); start 703 drivers/soc/ixp4xx/ixp4xx-npe.c i, res->start, res->end); start 708 drivers/soc/ixp4xx/ixp4xx-npe.c i, res->start, res->end); start 887 drivers/soc/qcom/smem.c smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size); start 890 drivers/soc/qcom/smem.c smem->regions[i].aux_base = (u32)r.start; start 195 drivers/soc/tegra/flowctrl.c res.start = 0x60007000; start 207 drivers/soc/tegra/flowctrl.c tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res)); start 126 drivers/soc/tegra/fuse/fuse-tegra.c fuse->phys = res->start; start 261 drivers/soc/tegra/fuse/fuse-tegra.c regs.start = 0x7000f800; start 326 drivers/soc/tegra/fuse/fuse-tegra.c fuse->base = ioremap_nocache(regs.start, resource_size(®s)); start 121 drivers/soc/tegra/fuse/tegra-apbmisc.c apbmisc.start = 0x70000800; start 127 drivers/soc/tegra/fuse/tegra-apbmisc.c straps.start = 0x7000e864; start 130 drivers/soc/tegra/fuse/tegra-apbmisc.c straps.start = 0x70000008; start 162 drivers/soc/tegra/fuse/tegra-apbmisc.c apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc)); start 166 drivers/soc/tegra/fuse/tegra-apbmisc.c strapping_base = ioremap_nocache(straps.start, resource_size(&straps)); start 2669 drivers/soc/tegra/pmc.c wake = ioremap_nocache(regs.start, resource_size(®s)); start 2871 drivers/soc/tegra/pmc.c regs.start = 0x7000e400; start 2895 drivers/soc/tegra/pmc.c pmc->base = ioremap_nocache(regs.start, resource_size(®s)); start 1222 drivers/soc/ti/knav_qmss_queue.c u32 temp[2], start, end, id, index; start 1289 drivers/soc/ti/knav_qmss_queue.c start = max(qmgr->start_queue, range->queue_base); start 1292 drivers/soc/ti/knav_qmss_queue.c for (id = start; id < end; id++) { start 87 drivers/soc/ti/ti_sci_inta_msi.c msi_desc->inta.dev_index = res->desc[set].start + i; start 195 drivers/soc/ti/ti_sci_pm_domains.c ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start; start 514 drivers/soc/xilinx/xlnx_vcu.c xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start, start 527 drivers/soc/xilinx/xlnx_vcu.c xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start, start 583 drivers/spi/spi-at91-usart.c aus->phybase = regs->start; start 601 drivers/spi/spi-at91-usart.c ®s->start, irq); start 1544 drivers/spi/spi-atmel.c as->phybase = regs->start; start 1641 drivers/spi/spi-atmel.c atmel_get_version(as), (unsigned long)regs->start, start 758 drivers/spi/spi-au1550.c hw->irq = r->start; start 763 drivers/spi/spi-au1550.c hw->dma_tx_id = r->start; start 766 drivers/spi/spi-au1550.c hw->dma_rx_id = r->start; start 783 drivers/spi/spi-au1550.c hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t), start 791 drivers/spi/spi-au1550.c hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t)); start 914 drivers/spi/spi-au1550.c release_mem_region(r->start, sizeof(psc_spi_t)); start 934 drivers/spi/spi-au1550.c release_mem_region(hw->ioarea->start, sizeof(psc_spi_t)); start 915 drivers/spi/spi-davinci.c dspi->pbase = r->start; start 704 drivers/spi/spi-ep93xx.c espi->sspdr_phys = res->start + SSPDR; start 732 drivers/spi/spi-ep93xx.c (unsigned long)res->start, irq); start 1122 drivers/spi/spi-fsl-dspi.c ret = dspi_request_dma(dspi, res->start); start 222 drivers/spi/spi-fsl-espi.c start: start 267 drivers/spi/spi-fsl-espi.c goto start; start 277 drivers/spi/spi-fsl-espi.c start: start 319 drivers/spi/spi-fsl-espi.c goto start; start 912 drivers/spi/spi-fsl-lpspi.c fsl_lpspi->base_phys = res->start; start 868 drivers/spi/spi-fsl-qspi.c q->memmap_phy = res->start; start 602 drivers/spi/spi-img-spfi.c spfi->phys = res->start; start 1675 drivers/spi/spi-imx.c spi_imx->base_phys = res->start; start 170 drivers/spi/spi-jcore.c if (!devm_request_mem_region(&pdev->dev, res->start, start 173 drivers/spi/spi-jcore.c hw->base = devm_ioremap_nocache(&pdev->dev, res->start, start 371 drivers/spi/spi-loopback-test.c #define RANGE_CHECK(ptr, plen, start, slen) \ start 372 drivers/spi/spi-loopback-test.c ((ptr >= start) && (ptr + plen <= start + slen)) start 447 drivers/spi/spi-loopback-test.c u8 *start; start 456 drivers/spi/spi-loopback-test.c if (rx_a->start > rx_b->start) start 458 drivers/spi/spi-loopback-test.c if (rx_a->start < rx_b->start) start 482 drivers/spi/spi-loopback-test.c ranges[i].start = xfer->rx_buf; start 507 drivers/spi/spi-loopback-test.c if ((addr >= r->start) && (addr < r->end)) start 898 drivers/spi/spi-loopback-test.c ktime_t start; start 904 drivers/spi/spi-loopback-test.c start = ktime_get(); start 907 drivers/spi/spi-loopback-test.c test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start)); start 606 drivers/spi/spi-npcm-fiu.c devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start + start 986 drivers/spi/spi-nxp-fspi.c f->memmap_phy = res->start; start 1451 drivers/spi/spi-omap2-mcspi.c mcspi->phys = r->start + regs_offset; start 756 drivers/spi/spi-orion.c dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); start 710 drivers/spi/spi-pic32.c pic32s->dma_base = mem->start; start 2220 drivers/spi/spi-pl022.c pl022->phybase = adev->res.start; start 2221 drivers/spi/spi-pl022.c pl022->virtbase = devm_ioremap(dev, adev->res.start, start 2228 drivers/spi/spi-pl022.c &adev->res.start, pl022->virtbase); start 488 drivers/spi/spi-ppc4xx.c hw->mapbase = resource.start; start 1585 drivers/spi/spi-pxa2xx.c ssp->phys_base = res->start; start 1081 drivers/spi/spi-qup.c ret = spi_qup_init_dma(master, res->start); start 713 drivers/spi/spi-rockchip.c rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR; start 714 drivers/spi/spi-rockchip.c rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR; start 1085 drivers/spi/spi-rspi.c res->start + RSPI_SPDR); start 1090 drivers/spi/spi-rspi.c res->start + RSPI_SPDR); start 1064 drivers/spi/spi-s3c64xx.c sdd->sfr_start = mem_res->start; start 247 drivers/spi/spi-sh-hspi.c res->start, resource_size(res)); start 1235 drivers/spi/spi-sh-msiof.c dma_tx_id, res->start + TFDR); start 1240 drivers/spi/spi-sh-msiof.c dma_rx_id, res->start + RFDR); start 154 drivers/spi/spi-sh-sci.c sp->membase = ioremap(r->start, resource_size(r)); start 466 drivers/spi/spi-sh.c ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 485 drivers/spi/spi-sprd-adi.c sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET; start 937 drivers/spi/spi-sprd.c ss->phy_base = res->start; start 556 drivers/spi/spi-stm32-qspi.c qspi->phys_base = res->start; start 1838 drivers/spi/spi-stm32.c spi->phys_addr = (dma_addr_t)res->start; start 210 drivers/spi/spi-sun4i.c unsigned int start, end, tx_time; start 329 drivers/spi/spi-sun4i.c start = jiffies; start 337 drivers/spi/spi-sun4i.c jiffies_to_msecs(end - start), tx_time); start 202 drivers/spi/spi-sun6i.c unsigned int start, end, tx_time; start 328 drivers/spi/spi-sun6i.c start = jiffies; start 336 drivers/spi/spi-sun6i.c jiffies_to_msecs(end - start), tx_time); start 1351 drivers/spi/spi-tegra114.c tspi->phys = r->start; start 1054 drivers/spi/spi-tegra20-slink.c tspi->phys = r->start; start 791 drivers/spi/spi-ti-qspi.c qspi->mmap_phys_base = (dma_addr_t)res_mmap->start; start 379 drivers/spi/spi-txx9.c (unsigned long long)res->start, irq, start 490 drivers/spi/spi-xilinx.c (unsigned long long)res->start, xspi->regs, xspi->irq); start 1965 drivers/spi/spi.c lookup->irq = r.start; start 44 drivers/ssb/bridge_pcmcia_80211.c dev->resource[2]->start = 0; start 61 drivers/ssb/bridge_pcmcia_80211.c err = ssb_bus_pcmciabus_register(ssb, dev, dev->resource[2]->start); start 14 drivers/ssb/driver_chipcommon_sflash.c .start = SSB_FLASH2, start 159 drivers/ssb/driver_chipcommon_sflash.c ssb_sflash_dev.resource[0].end = ssb_sflash_dev.resource[0].start + start 190 drivers/ssb/driver_gige.c dev->io_resource.start = 0x800; start 203 drivers/ssb/driver_gige.c dev->mem_resource.start = base; start 265 drivers/ssb/driver_gige.c res->start = dev->mem_resource.start; start 260 drivers/ssb/driver_mipscore.c ssb_pflash_resource.start = pflash->window; start 239 drivers/ssb/driver_pcicore.c .start = SSB_PCI_DMA, start 246 drivers/ssb/driver_pcicore.c .start = 0x100, start 109 drivers/staging/android/ashmem.c size_t start, size_t end) start 111 drivers/staging/android/ashmem.c return (range->pgstart >= start) && (range->pgend <= end); start 115 drivers/staging/android/ashmem.c size_t start, size_t end) start 117 drivers/staging/android/ashmem.c return (range->pgstart <= start) && (range->pgend >= end); start 126 drivers/staging/android/ashmem.c size_t start, size_t end) start 128 drivers/staging/android/ashmem.c return page_in_range(range, start) || page_in_range(range, end) || start 129 drivers/staging/android/ashmem.c page_range_subsumes_range(range, start, end); start 178 drivers/staging/android/ashmem.c size_t start, size_t end, start 185 drivers/staging/android/ashmem.c range->pgstart = start; start 220 drivers/staging/android/ashmem.c size_t start, size_t end) start 224 drivers/staging/android/ashmem.c range->pgstart = start; start 479 drivers/staging/android/ashmem.c loff_t start = range->pgstart * PAGE_SIZE; start 492 drivers/staging/android/ashmem.c start, end - start); start 778 drivers/staging/axis-fifo/axis-fifo.c if (!request_mem_region(fifo->mem->start, resource_size(fifo->mem), start 782 drivers/staging/axis-fifo/axis-fifo.c &fifo->mem->start); start 787 drivers/staging/axis-fifo/axis-fifo.c &fifo->mem->start, &fifo->mem->end); start 790 drivers/staging/axis-fifo/axis-fifo.c fifo->base_addr = ioremap(fifo->mem->start, resource_size(fifo->mem)); start 800 drivers/staging/axis-fifo/axis-fifo.c DRIVER_NAME, &fifo->mem->start); start 979 drivers/staging/axis-fifo/axis-fifo.c &fifo->mem->start); start 985 drivers/staging/axis-fifo/axis-fifo.c fifo->irq = r_irq->start; start 1032 drivers/staging/axis-fifo/axis-fifo.c &fifo->mem->start, &fifo->base_addr, fifo->irq, start 1048 drivers/staging/axis-fifo/axis-fifo.c release_mem_region(fifo->mem->start, resource_size(fifo->mem)); start 1066 drivers/staging/axis-fifo/axis-fifo.c release_mem_region(fifo->mem->start, resource_size(fifo->mem)); start 55 drivers/staging/board/armadillo800eva.c .start = 0xfe940000, start 60 drivers/staging/board/armadillo800eva.c .start = 177 + 32, start 32 drivers/staging/board/board.c if (res.start == base_address) { start 52 drivers/staging/board/board.c if (find_by_address(r->start)) start 77 drivers/staging/board/board.c unsigned int hwirq = res->start; start 110 drivers/staging/board/board.c res->start = virq; start 2305 drivers/staging/comedi/comedi_fops.c unsigned long start = vma->vm_start; start 2379 drivers/staging/comedi/comedi_fops.c retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE, start 2384 drivers/staging/comedi/comedi_fops.c start += PAGE_SIZE; start 1030 drivers/staging/comedi/comedidev.h unsigned long start, unsigned long len); start 1032 drivers/staging/comedi/comedidev.h unsigned long start, unsigned long len); start 875 drivers/staging/comedi/drivers.c unsigned long start, unsigned long len) start 877 drivers/staging/comedi/drivers.c if (!start) { start 884 drivers/staging/comedi/drivers.c if (!request_region(start, len, dev->board_name)) { start 886 drivers/staging/comedi/drivers.c dev->board_name, start, len); start 910 drivers/staging/comedi/drivers.c unsigned long start, unsigned long len) start 914 drivers/staging/comedi/drivers.c ret = __comedi_request_region(dev, start, len); start 916 drivers/staging/comedi/drivers.c dev->iobase = start; start 360 drivers/staging/comedi/drivers/cb_das16_cs.c dev->iobase = link->resource[0]->start; start 366 drivers/staging/comedi/drivers/comedi_8254.c unsigned int start; start 383 drivers/staging/comedi/drivers/comedi_8254.c start = div / d2; start 384 drivers/staging/comedi/drivers/comedi_8254.c if (start < 2) start 385 drivers/staging/comedi/drivers/comedi_8254.c start = 2; start 386 drivers/staging/comedi/drivers/comedi_8254.c for (d1 = start; d1 <= div / d1 + 1 && d1 <= I8254_MAX_COUNT; d1++) { start 65 drivers/staging/comedi/drivers/das08_cs.c iobase = link->resource[0]->start; start 147 drivers/staging/comedi/drivers/gsc_hpdi.c unsigned int start; start 155 drivers/staging/comedi/drivers/gsc_hpdi.c start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr); start 157 drivers/staging/comedi/drivers/gsc_hpdi.c for (desc = 0; (next < start || next >= start + devpriv->block_size) && start 170 drivers/staging/comedi/drivers/gsc_hpdi.c start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr); start 220 drivers/staging/comedi/drivers/ni_daq_700.c dev->iobase = link->resource[0]->start; start 41 drivers/staging/comedi/drivers/ni_daq_dio24.c dev->iobase = link->resource[0]->start; start 69 drivers/staging/comedi/drivers/ni_labpc_cs.c dev->iobase = link->resource[0]->start; start 139 drivers/staging/comedi/drivers/ni_mio_cs.c p_dev->resource[0]->start = base; start 164 drivers/staging/comedi/drivers/ni_mio_cs.c dev->iobase = link->resource[0]->start; start 263 drivers/staging/comedi/drivers/quatech_daqp_cs.c int start) start 275 drivers/staging/comedi/drivers/quatech_daqp_cs.c if (start) start 500 drivers/staging/comedi/drivers/quatech_daqp_cs.c int start = (i == 0 || scanlist_start_on_every_entry); start 502 drivers/staging/comedi/drivers/quatech_daqp_cs.c daqp_ai_set_one_scanlist_entry(dev, cmd->chanlist[i], start); start 714 drivers/staging/comedi/drivers/quatech_daqp_cs.c dev->iobase = link->resource[0]->start; start 2816 drivers/staging/exfat/exfat_super.c loff_t start = i_size_read(inode), count = size - i_size_read(inode); start 2827 drivers/staging/exfat/exfat_super.c err = filemap_fdatawrite_range(mapping, start, start 2828 drivers/staging/exfat/exfat_super.c start + count - 1); start 2834 drivers/staging/exfat/exfat_super.c err = filemap_fdatawait_range(mapping, start, start 2835 drivers/staging/exfat/exfat_super.c start + count - 1); start 2460 drivers/staging/fwserial/fwserial.c .start = CSR_REGISTER_BASE + 0x1e0000ULL, start 453 drivers/staging/gasket/gasket_core.c min_addr = bar_desc.mappable_regions[i].start - start 455 drivers/staging/gasket/gasket_core.c max_addr = bar_desc.mappable_regions[i].start - start 774 drivers/staging/gasket/gasket_core.c ulong range_start = region->start; start 799 drivers/staging/gasket/gasket_core.c mappable_region->start = range_start; start 816 drivers/staging/gasket/gasket_core.c mappable_region->start = bar_offset; start 921 drivers/staging/gasket/gasket_core.c phys_offset = region_to_map.start; start 175 drivers/staging/gasket/gasket_core.h u64 start; start 394 drivers/staging/gasket/gasket_page_table.c int i, start; start 404 drivers/staging/gasket/gasket_page_table.c start = min(pg_tbl->num_simple_entries, num_simple_entries); start 406 drivers/staging/gasket/gasket_page_table.c for (i = start; i < pg_tbl->config.total_entries; i++) { start 300 drivers/staging/goldfish/goldfish_audio.c data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); start 387 drivers/staging/greybus/hid.c .start = gb_hid_start, start 847 drivers/staging/greybus/tools/loopback_test.c start(t); start 147 drivers/staging/greybus/usb.c .start = hcd_start, start 132 drivers/staging/isdn/avm/avm_cs.c if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) { start 135 drivers/staging/isdn/avm/avm_cs.c (unsigned int) link->resource[0]->start, link->irq); start 146 drivers/staging/isdn/avm/avm_cs.c b1pcmcia_delcard(link->resource[0]->start, link->irq); start 182 drivers/staging/isdn/avm/b1isa.c if (isa_dev[i].resource[0].start) start 185 drivers/staging/isdn/avm/b1isa.c isa_dev[i].resource[0].start = data->port; start 217 drivers/staging/isdn/avm/b1isa.c isa_dev[i].resource[0].start = io[i]; start 236 drivers/staging/isdn/avm/b1isa.c if (isa_dev[i].resource[0].start) start 531 drivers/staging/isdn/avm/t1isa.c if (isa_dev[i].resource[0].start) start 534 drivers/staging/isdn/avm/t1isa.c isa_dev[i].resource[0].start = data->port; start 566 drivers/staging/isdn/avm/t1isa.c isa_dev[i].resource[0].start = io[i]; start 118 drivers/staging/kpc2000/kpc2000/cell_probe.c resources[0].start = cte.offset; start 122 drivers/staging/kpc2000/kpc2000/cell_probe.c resources[1].start = pcard->pdev->irq; start 356 drivers/staging/kpc2000/kpc2000/cell_probe.c resources[0].start = engine_regs_offset; start 360 drivers/staging/kpc2000/kpc2000/cell_probe.c resources[1].start = irq_num; start 360 drivers/staging/kpc2000/kpc2000/core.c pcard->regs_base_resource.start = reg_bar_phys_addr; start 391 drivers/staging/kpc2000/kpc2000/core.c pcard->dma_base_resource.start = dma_bar_phys_addr; start 593 drivers/staging/kpc2000/kpc2000_i2c.c res->start, start 469 drivers/staging/kpc2000/kpc2000_spi.c kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start, start 125 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c ldev->eng_regs = ioremap_nocache(r->start, resource_size(r)); start 138 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c ldev->irq = r->start; start 2920 drivers/staging/media/allegro-dvt/allegro-core.c regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); start 2939 drivers/staging/media/allegro-dvt/allegro-core.c sram_res->start, start 832 drivers/staging/media/imx/imx-media-utils.c find_pipeline_entity(struct media_entity *start, u32 grp_id, start 839 drivers/staging/media/imx/imx-media-utils.c if (grp_id && is_media_entity_v4l2_subdev(start)) { start 840 drivers/staging/media/imx/imx-media-utils.c sd = media_entity_to_v4l2_subdev(start); start 843 drivers/staging/media/imx/imx-media-utils.c } else if (buftype && is_media_entity_v4l2_video_device(start)) { start 844 drivers/staging/media/imx/imx-media-utils.c vfd = media_entity_to_video_device(start); start 849 drivers/staging/media/imx/imx-media-utils.c pad = imx_media_pipeline_pad(start, grp_id, buftype, upstream); start 44 drivers/staging/media/imx/imx-media-vdic.c void (*start)(struct vdic_priv *priv); start 347 drivers/staging/media/imx/imx-media-vdic.c .start = vdic_start_direct, start 354 drivers/staging/media/imx/imx-media-vdic.c .start = vdic_start_indirect, start 390 drivers/staging/media/imx/imx-media-vdic.c priv->ops->start(priv); start 622 drivers/staging/media/imx/imx6-mipi-csi2.c csi2->base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE); start 1900 drivers/staging/media/ipu3/ipu3-abi.h u8 start; /* index of the oldest element */ start 1716 drivers/staging/media/ipu3/ipu3-css-params.c static u16 imgu_css_grid_end(u16 start, u8 width, u8 block_width_log2) start 1718 drivers/staging/media/ipu3/ipu3-css-params.c return (start & IPU3_UAPI_GRID_START_MASK) + start 1123 drivers/staging/media/ipu3/ipu3-css.c u8 size, start, end, end2; start 1127 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->host2sp_bufq_info[thread][queue].start); start 1131 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->host2sp_evtq_info.start); start 1139 drivers/staging/media/ipu3/ipu3-css.c if (end2 == start) start 1161 drivers/staging/media/ipu3/ipu3-css.c u8 size, start, end, start2; start 1165 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->sp2host_bufq_info[queue].start); start 1169 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->sp2host_evtq_info.start); start 1176 drivers/staging/media/ipu3/ipu3-css.c if (end == start) start 1179 drivers/staging/media/ipu3/ipu3-css.c start2 = (start + 1) % size; start 1182 drivers/staging/media/ipu3/ipu3-css.c *data = readl(&q->sp2host_bufq[queue][start]); start 1183 drivers/staging/media/ipu3/ipu3-css.c writeb(start2, &q->sp2host_bufq_info[queue].start); start 1187 drivers/staging/media/ipu3/ipu3-css.c *data = readl(&q->sp2host_evtq[start]); start 1188 drivers/staging/media/ipu3/ipu3-css.c writeb(start2, &q->sp2host_evtq_info.start); start 203 drivers/staging/media/meson/vdec/codec_mpeg12.c .start = codec_mpeg12_start, start 91 drivers/staging/media/meson/vdec/vdec.c ret = vdec_ops->start(sess); start 98 drivers/staging/media/meson/vdec/vdec.h int (*start)(struct amvdec_session *sess); start 122 drivers/staging/media/meson/vdec/vdec.h int (*start)(struct amvdec_session *sess); start 199 drivers/staging/media/meson/vdec/vdec_1.c ret = codec_ops->start(sess); start 226 drivers/staging/media/meson/vdec/vdec_1.c .start = vdec_1_start, start 383 drivers/staging/media/omap4iss/iss_video.c unsigned int start; start 395 drivers/staging/media/omap4iss/iss_video.c start = iss_pipeline_ready(pipe); start 396 drivers/staging/media/omap4iss/iss_video.c if (start) start 400 drivers/staging/media/omap4iss/iss_video.c if (start) start 121 drivers/staging/media/sunxi/cedrus/cedrus.h int (*start)(struct cedrus_ctx *ctx); start 577 drivers/staging/media/sunxi/cedrus/cedrus_h264.c .start = cedrus_h264_start, start 461 drivers/staging/media/sunxi/cedrus/cedrus_video.c dev->dec_ops[ctx->current_codec]->start) start 462 drivers/staging/media/sunxi/cedrus/cedrus_video.c ret = dev->dec_ops[ctx->current_codec]->start(ctx); start 836 drivers/staging/most/dim2/dim2.c if (sizeof(res->start) == sizeof(long long)) start 838 drivers/staging/most/dim2/dim2.c else if (sizeof(res->start) == sizeof(long)) start 843 drivers/staging/most/dim2/dim2.c snprintf(dev->name, sizeof(dev->name), fmt, res->start); start 271 drivers/staging/mt7621-pci/pci-mt7621.c mask = ~(mem_resource->end - mem_resource->start); start 273 drivers/staging/mt7621-pci/pci-mt7621.c write_gcr_reg1_base(mem_resource->start); start 317 drivers/staging/mt7621-pci/pci-mt7621.c pcie->busn.start = 0; start 636 drivers/staging/mt7621-pci/pci-mt7621.c host->busnr = pcie->busn.start; start 682 drivers/staging/mt7621-pci/pci-mt7621.c iomem_resource.start = 0; start 684 drivers/staging/mt7621-pci/pci-mt7621.c ioport_resource.start = 0; start 54 drivers/staging/netlogic/platform_net.c res->start = CPHYSADDR(nlm_mmio_base(offset)); start 55 drivers/staging/netlogic/platform_net.c res->end = res->start + 0xfff; start 60 drivers/staging/netlogic/platform_net.c res->start = irq; start 996 drivers/staging/netlogic/xlr_net.c ndev->irq = res->start; start 112 drivers/staging/nvec/nvec_ps2.c ser_dev->start = ps2_startstreaming; start 3516 drivers/staging/octeon-usb/octeon-hcd.c .start = octeon_usb_start, start 3594 drivers/staging/octeon-usb/octeon-hcd.c usb_num = (res_mem->start >> 44) & 1; start 12 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_drv_version(char *page, char **start, start 24 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_write_reg(char *page, char **start, start 73 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_read_reg(char *page, char **start, start 133 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_adapter_state(char *page, char **start, start 148 drivers/staging/rtl8188eu/core/rtw_debug.c int proc_get_best_channel(char *page, char **start, start 211 drivers/staging/rtl8188eu/core/rtw_efuse.c unsigned long start = 0; start 233 drivers/staging/rtl8188eu/core/rtw_efuse.c start = jiffies; start 235 drivers/staging/rtl8188eu/core/rtw_efuse.c jiffies_to_msecs(jiffies - start) < 1000) { start 776 drivers/staging/rtl8188eu/core/rtw_ieee80211.c enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len, start 781 drivers/staging/rtl8188eu/core/rtw_ieee80211.c u8 *pos = start; start 697 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c unsigned long start = jiffies; start 722 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 727 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1260 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c unsigned long start = jiffies; start 1291 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1296 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1389 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c unsigned long start = jiffies; start 1420 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1425 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1504 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c unsigned long start = jiffies; start 1528 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1533 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1867 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c unsigned long start = jiffies; start 1884 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 1887 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c u32 passing_time = jiffies_to_msecs(jiffies - start); start 1892 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c jiffies_to_msecs(jiffies - start)); start 548 drivers/staging/rtl8188eu/core/rtw_pwrctrl.c unsigned long start; start 555 drivers/staging/rtl8188eu/core/rtw_pwrctrl.c start = jiffies; start 559 drivers/staging/rtl8188eu/core/rtw_pwrctrl.c jiffies_to_msecs(jiffies - start) <= 3000) start 43 drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c unsigned long start = 0; start 49 drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c start = jiffies; start 51 drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c jiffies_to_msecs(jiffies - start) < 1000) { start 1084 drivers/staging/rtl8188eu/hal/usb_halinit.c unsigned long start = jiffies; start 1091 drivers/staging/rtl8188eu/hal/usb_halinit.c jiffies_to_msecs(jiffies - start)); start 718 drivers/staging/rtl8188eu/include/ieee80211.h enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len, start 107 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_drv_version(char *page, char **start, start 111 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_write_reg(char *page, char **start, start 117 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_read_reg(char *page, char **start, start 124 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_adapter_state(char *page, char **start, start 128 drivers/staging/rtl8188eu/include/rtw_debug.h int proc_get_best_channel(char *page, char **start, start 94 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c char *start, char *stop) start 114 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); start 120 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.ssid.ssid); start 164 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); start 178 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); start 189 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); start 198 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.ssid.ssid); start 228 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN); start 239 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c return start; start 254 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 259 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie); start 269 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 274 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie); start 292 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr); start 314 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); start 315 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c return start; start 700 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c start: start 837 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c goto start; start 77 drivers/staging/rtl8192e/rtllib.h #define iwe_stream_add_event_rsl(info, start, stop, iwe, len) \ start 78 drivers/staging/rtl8192e/rtllib.h iwe_stream_add_event(info, start, stop, iwe, len) start 80 drivers/staging/rtl8192e/rtllib.h #define iwe_stream_add_point_rsl(info, start, stop, iwe, p) \ start 81 drivers/staging/rtl8192e/rtllib.h iwe_stream_add_point(info, start, stop, iwe, p) start 1778 drivers/staging/rtl8192e/rtllib.h bool start); start 35 drivers/staging/rtl8192e/rtllib_wx.c char *start, char *stop, start 52 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_event_rsl(info, start, stop, start 61 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, &iwe, start 65 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, start 69 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, &iwe, start 83 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_event_rsl(info, start, stop, start 93 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_event_rsl(info, start, stop, start 102 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, start 112 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, start 163 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, start 168 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, start 184 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, start 191 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, start 201 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, &iwe, buf); start 210 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, &iwe, buf); start 221 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, &iwe, buf); start 234 drivers/staging/rtl8192e/rtllib_wx.c start = iwe_stream_add_point_rsl(info, start, stop, start 237 drivers/staging/rtl8192e/rtllib_wx.c return start; start 40 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c char *start, char *stop, start 57 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN); start 66 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>"); start 69 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid); start 81 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN); start 90 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN); start 100 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN); start 108 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid); start 154 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, start 159 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, custom); start 174 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN); start 180 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, custom); start 194 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 209 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 221 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c start = iwe_stream_add_point(info, start, stop, &iwe, custom); start 223 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c return start; start 130 drivers/staging/rtl8712/rtl871x_ioctl_linux.c char *start, char *stop) start 153 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, start 158 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, start 173 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, start 178 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, iwe, start 182 drivers/staging/rtl8712/rtl871x_ioctl_linux.c return start; start 188 drivers/staging/rtl8712/rtl871x_ioctl_linux.c char *start, char *stop) start 200 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, start 205 drivers/staging/rtl8712/rtl871x_ioctl_linux.c return start; start 211 drivers/staging/rtl8712/rtl871x_ioctl_linux.c char *start, char *stop) start 232 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); start 237 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, start 265 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); start 276 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, start 295 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, start 305 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, start 308 drivers/staging/rtl8712/rtl871x_ioctl_linux.c current_val = start + iwe_stream_lcp_len(info); start 318 drivers/staging/rtl8712/rtl871x_ioctl_linux.c current_val = iwe_stream_add_value(info, start, current_val, start 322 drivers/staging/rtl8712/rtl871x_ioctl_linux.c if ((current_val - start) > iwe_stream_lcp_len(info)) start 323 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = current_val; start 325 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = translate_scan_wpa(info, pnetwork, &iwe, start, stop); start 327 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = translate_scan_wps(info, pnetwork, &iwe, start, stop); start 338 drivers/staging/rtl8712/rtl871x_ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); start 340 drivers/staging/rtl8712/rtl871x_ioctl_linux.c return start; start 1700 drivers/staging/rtl8723bs/core/rtw_cmd.c unsigned long start = jiffies; start 1709 drivers/staging/rtl8723bs/core/rtw_cmd.c while (false == empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) { start 232 drivers/staging/rtl8723bs/core/rtw_ieee80211.c u8 *start; start 238 drivers/staging/rtl8723bs/core/rtw_ieee80211.c start = ies + offset; start 242 drivers/staging/rtl8723bs/core/rtw_ieee80211.c target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen); start 246 drivers/staging/rtl8723bs/core/rtw_ieee80211.c uint remain_len = search_len - (remain_ies - start); start 253 drivers/staging/rtl8723bs/core/rtw_ieee80211.c start = target_ie; start 963 drivers/staging/rtl8723bs/core/rtw_ieee80211.c ParseRes rtw_ieee802_11_parse_elems(u8 *start, uint len, start 968 drivers/staging/rtl8723bs/core/rtw_ieee80211.c u8 *pos = start; start 1139 drivers/staging/rtl8723bs/core/rtw_mlme.c unsigned long start; start 1143 drivers/staging/rtl8723bs/core/rtw_mlme.c start = jiffies; start 1146 drivers/staging/rtl8723bs/core/rtw_mlme.c && jiffies_to_msecs(start) <= 200) { start 4233 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c unsigned long start = jiffies; start 4254 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c DBG_871X("%s fail! %u ms\n", __func__, jiffies_to_msecs(jiffies - start)); start 4257 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c unsigned long passing_time = jiffies_to_msecs(jiffies - start); start 1207 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c unsigned long start = jiffies; start 1223 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c while (pwrpriv->ps_processing && jiffies_to_msecs(jiffies - start) <= 3000) start 1234 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c && jiffies_to_msecs(jiffies - start) <= 3000 start 785 drivers/staging/rtl8723bs/core/rtw_security.c static unsigned long start; start 792 drivers/staging/rtl8723bs/core/rtw_security.c if (start == 0) start 793 drivers/staging/rtl8723bs/core/rtw_security.c start = jiffies; start 800 drivers/staging/rtl8723bs/core/rtw_security.c if (jiffies_to_msecs(jiffies - start) > 1000) { start 805 drivers/staging/rtl8723bs/core/rtw_security.c start = jiffies; start 816 drivers/staging/rtl8723bs/core/rtw_security.c start = 0; start 1875 drivers/staging/rtl8723bs/core/rtw_security.c static unsigned long start; start 1884 drivers/staging/rtl8723bs/core/rtw_security.c if (start == 0) start 1885 drivers/staging/rtl8723bs/core/rtw_security.c start = jiffies; start 1892 drivers/staging/rtl8723bs/core/rtw_security.c if (jiffies_to_msecs(jiffies - start) > 1000) { start 1897 drivers/staging/rtl8723bs/core/rtw_security.c start = jiffies; start 1909 drivers/staging/rtl8723bs/core/rtw_security.c start = 0; start 2289 drivers/staging/rtl8723bs/core/rtw_xmit.c static unsigned long start; start 2299 drivers/staging/rtl8723bs/core/rtw_xmit.c if (start == 0) start 2300 drivers/staging/rtl8723bs/core/rtw_xmit.c start = jiffies; start 2304 drivers/staging/rtl8723bs/core/rtw_xmit.c if (jiffies_to_msecs(jiffies - start) > 2000) { start 2307 drivers/staging/rtl8723bs/core/rtw_xmit.c start = jiffies; start 32 drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c u32 start = 0, func_start = 0, func_end = 0; start 60 drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c start = jiffies; start 110 drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c if ((valid_done == max_rf_path) || (jiffies_to_msecs(jiffies - start) > max_time)) { start 228 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c unsigned long start = jiffies; start 238 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c } while (jiffies_to_msecs(jiffies-start) < timeout_ms || cnt < min_cnt); start 258 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c jiffies_to_msecs(jiffies-start), start 271 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c unsigned long start = jiffies; start 288 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c } while (jiffies_to_msecs(jiffies - start) < timeout_ms || cnt < min_cnt); start 308 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c jiffies_to_msecs(jiffies-start), start 2321 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c unsigned long start, passing_time; start 2332 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c start = jiffies; start 2341 drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c passing_time = jiffies_to_msecs(jiffies - start); start 1367 drivers/staging/rtl8723bs/hal/sdio_halinit.c unsigned long start; start 1382 drivers/staging/rtl8723bs/hal/sdio_halinit.c start = jiffies; start 1395 drivers/staging/rtl8723bs/hal/sdio_halinit.c MSG_8192C("<==== _ReadAdapterInfo8723BS in %d ms\n", jiffies_to_msecs(jiffies - start)); start 1114 drivers/staging/rtl8723bs/include/ieee80211.h ParseRes rtw_ieee802_11_parse_elems(u8 *start, uint len, start 82 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c char *start, char *stop) start 102 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); start 108 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid); start 153 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); start 172 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); start 183 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); start 192 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid); start 198 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c return start; start 226 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN); start 240 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c return start; start 258 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 263 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie); start 274 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 279 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie); start 305 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr); start 364 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); start 379 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c start = iwe_stream_add_point(info, start, stop, &iwe, buf); start 385 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c return start; start 2280 drivers/staging/rts5208/ms.c u16 start, end, phy_blk, log_blk, tmp_blk, idx; start 2318 drivers/staging/rts5208/ms.c start = (u16)seg_no << 9; start 2327 drivers/staging/rts5208/ms.c for (phy_blk = start; phy_blk < end; phy_blk++) { start 1639 drivers/staging/rts5208/sd.c path[j].start = i; start 1650 drivers/staging/rts5208/sd.c path[idx].start + 1; start 1651 drivers/staging/rts5208/sd.c path[idx].mid = path[idx].start + start 1663 drivers/staging/rts5208/sd.c path[idx].len = path[idx].end - path[idx].start + 1; start 1664 drivers/staging/rts5208/sd.c path[idx].mid = path[idx].start + path[idx].len / 2; start 1667 drivers/staging/rts5208/sd.c if ((path[0].start == 0) && start 1669 drivers/staging/rts5208/sd.c path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1; start 1671 drivers/staging/rts5208/sd.c path[0].mid = path[0].start + path[0].len / 2; start 1689 drivers/staging/rts5208/sd.c i, path[i].start); start 259 drivers/staging/rts5208/sd.h int start; start 1298 drivers/staging/rts5208/xd.c u32 start, end, i; start 1335 drivers/staging/rts5208/xd.c start = 0; start 1337 drivers/staging/rts5208/xd.c start = xd_card->cis_block + 1; start 1346 drivers/staging/rts5208/xd.c start = (u32)(zone_no) << 10; start 1352 drivers/staging/rts5208/xd.c start, end); start 1358 drivers/staging/rts5208/xd.c for (i = start; i < end; i++) { start 1453 drivers/staging/rts5208/xd.c for (start = 0; start < end; start++) { start 1454 drivers/staging/rts5208/xd.c if (zone->l2p_table[start] == 0xFFFF) start 912 drivers/staging/sm750fb/sm750.c info->cmap.start, info->cmap.len, start 923 drivers/staging/sm750fb/sm750.c info->cmap.start, info->cmap.len, start 358 drivers/staging/speakup/i18n.c .start = MSG_CTL_START, start 363 drivers/staging/speakup/i18n.c .start = MSG_COLORS_START, start 368 drivers/staging/speakup/i18n.c .start = MSG_FORMATTED_START, start 373 drivers/staging/speakup/i18n.c .start = MSG_FUNCNAMES_START, start 378 drivers/staging/speakup/i18n.c .start = MSG_KEYNAMES_START, start 383 drivers/staging/speakup/i18n.c .start = MSG_ANNOUNCEMENTS_START, start 388 drivers/staging/speakup/i18n.c .start = MSG_STATES_START, start 596 drivers/staging/speakup/i18n.c for (i = group->start; i <= group->end; i++) { start 224 drivers/staging/speakup/i18n.h enum msg_index_t start; start 116 drivers/staging/speakup/keyhelp.c char start = SPACE; start 124 drivers/staging/speakup/keyhelp.c if (start == *cur_funcname) start 126 drivers/staging/speakup/keyhelp.c start = *cur_funcname; start 127 drivers/staging/speakup/keyhelp.c letter_offsets[(start & 31) - 1] = i; start 766 drivers/staging/speakup/kobjects.c enum msg_index_t firstmessage = group->start; start 850 drivers/staging/speakup/kobjects.c retval = message_show_helper(buf, group->start, group->end); start 849 drivers/staging/speakup/main.c u_long start = vc->vc_origin + (spk_y * vc->vc_size_row); start 850 drivers/staging/speakup/main.c u_long end = start + (to * 2); start 852 drivers/staging/speakup/main.c start += from * 2; start 853 drivers/staging/speakup/main.c if (say_from_to(vc, start, end, read_punc) <= 0) start 884 drivers/staging/speakup/main.c u_long start, end; start 892 drivers/staging/speakup/main.c start = vc->vc_origin + ((spk_y) * vc->vc_size_row); start 899 drivers/staging/speakup/main.c spk_attr = get_attributes(vc, (u_short *)start); start 901 drivers/staging/speakup/main.c while (start < end) { start 902 drivers/staging/speakup/main.c sentbuf[bn][i] = get_char(vc, (u_short *)start, &tmp); start 914 drivers/staging/speakup/main.c start += 2; start 935 drivers/staging/speakup/main.c u_long start = vc->vc_origin, end; start 938 drivers/staging/speakup/main.c start += from * vc->vc_size_row; start 942 drivers/staging/speakup/main.c for (from = start; from < end; from = to) { start 955 drivers/staging/speakup/main.c u_long start, end, from, to; start 961 drivers/staging/speakup/main.c start = vc->vc_origin + (win_top * vc->vc_size_row); start 963 drivers/staging/speakup/main.c while (start <= end) { start 964 drivers/staging/speakup/main.c from = start + (win_left * 2); start 965 drivers/staging/speakup/main.c to = start + (win_right * 2); start 967 drivers/staging/speakup/main.c start += vc->vc_size_row; start 1612 drivers/staging/speakup/main.c u16 *start = (u16 *)vc->vc_origin; start 1618 drivers/staging/speakup/main.c u16 *end = start + vc->vc_cols * 2; start 1621 drivers/staging/speakup/main.c for (ptr = start; ptr < end; ptr++) { start 1626 drivers/staging/speakup/main.c start += vc->vc_size_row; start 52 drivers/staging/speakup/speakup.h char *spk_s2uchar(char *start, char *dest); start 307 drivers/staging/speakup/speakup_acntpc.c module_param_named(start, synth_acntpc.startup, short, 0444); start 310 drivers/staging/speakup/speakup_acntpc.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 131 drivers/staging/speakup/speakup_acntsa.c module_param_named(start, synth_acntsa.startup, short, 0444); start 135 drivers/staging/speakup/speakup_acntsa.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 195 drivers/staging/speakup/speakup_apollo.c module_param_named(start, synth_apollo.startup, short, 0444); start 199 drivers/staging/speakup/speakup_apollo.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 158 drivers/staging/speakup/speakup_audptr.c module_param_named(start, synth_audptr.startup, short, 0444); start 162 drivers/staging/speakup/speakup_audptr.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 115 drivers/staging/speakup/speakup_bns.c module_param_named(start, synth_bns.startup, short, 0444); start 119 drivers/staging/speakup/speakup_bns.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 223 drivers/staging/speakup/speakup_decext.c module_param_named(start, synth_decext.startup, short, 0444); start 227 drivers/staging/speakup/speakup_decext.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 481 drivers/staging/speakup/speakup_decpc.c module_param_named(start, synth_dec_pc.startup, short, 0444); start 483 drivers/staging/speakup/speakup_decpc.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 295 drivers/staging/speakup/speakup_dectlk.c module_param_named(start, synth_dectlk.startup, short, 0444); start 299 drivers/staging/speakup/speakup_dectlk.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 378 drivers/staging/speakup/speakup_dtlk.c module_param_named(start, synth_dtlk.startup, short, 0444); start 381 drivers/staging/speakup/speakup_dtlk.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 118 drivers/staging/speakup/speakup_dummy.c module_param_named(start, synth_dummy.startup, short, 0444); start 122 drivers/staging/speakup/speakup_dummy.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 307 drivers/staging/speakup/speakup_keypc.c module_param_named(start, synth_keypc.startup, short, 0444); start 310 drivers/staging/speakup/speakup_keypc.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 162 drivers/staging/speakup/speakup_ltlk.c module_param_named(start, synth_ltlk.startup, short, 0444); start 166 drivers/staging/speakup/speakup_ltlk.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 417 drivers/staging/speakup/speakup_soft.c module_param_named(start, synth_soft.startup, short, 0444); start 419 drivers/staging/speakup/speakup_soft.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 126 drivers/staging/speakup/speakup_spkout.c module_param_named(start, synth_spkout.startup, short, 0444); start 130 drivers/staging/speakup/speakup_spkout.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 114 drivers/staging/speakup/speakup_txprt.c module_param_named(start, synth_txprt.startup, short, 0444); start 118 drivers/staging/speakup/speakup_txprt.c MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); start 73 drivers/staging/speakup/spk_priv.h int synth_request_region(unsigned long start, unsigned long n); start 74 drivers/staging/speakup/spk_priv.h int synth_release_region(unsigned long start, unsigned long n); start 327 drivers/staging/speakup/synth.c int synth_request_region(unsigned long start, unsigned long n) start 333 drivers/staging/speakup/synth.c synth_res.start = start; start 334 drivers/staging/speakup/synth.c synth_res.end = start + n - 1; start 340 drivers/staging/speakup/synth.c int synth_release_region(unsigned long start, unsigned long n) start 328 drivers/staging/speakup/varhandlers.c char *spk_s2uchar(char *start, char *dest) start 333 drivers/staging/speakup/varhandlers.c val = simple_strtoul(skip_spaces(start), &start, 10); start 334 drivers/staging/speakup/varhandlers.c if (*start == ',') start 335 drivers/staging/speakup/varhandlers.c start++; start 337 drivers/staging/speakup/varhandlers.c return start; start 831 drivers/staging/uwb/hwa-rc.c uwb_rc->start = hwarc_neep_init; start 347 drivers/staging/uwb/lc-rc.c result = rc->start(rc); start 367 drivers/staging/uwb/reset.c ret = rc->start(rc); start 364 drivers/staging/uwb/uwb.h int (*start)(struct uwb_rc *rc); start 210 drivers/staging/uwb/whc-rc.c whcrc->area = umc_dev->resource.start; start 380 drivers/staging/uwb/whc-rc.c uwb_rc->start = whcrc_start_rc; start 111 drivers/staging/uwb/whci.c umc->resource.start = pci_resource_start(card->pci, bar) start 113 drivers/staging/uwb/whci.c umc->resource.end = umc->resource.start start 91 drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h struct vc_audio_start start; start 1622 drivers/staging/vt6655/device_main.c .start = vnt_start, start 937 drivers/staging/vt6656/main_usb.c .start = vnt_start, start 1040 drivers/staging/wilc1000/wilc_wlan.c int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer, start 1049 drivers/staging/wilc1000/wilc_wlan.c if (start) start 1081 drivers/staging/wilc1000/wilc_wlan.c int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit, start 1090 drivers/staging/wilc1000/wilc_wlan.c if (start) start 285 drivers/staging/wilc1000/wilc_wlan.h int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer, start 287 drivers/staging/wilc1000/wilc_wlan.h int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit, start 610 drivers/staging/wusbcore/host/hwa-hc.c .start = hwahc_op_start, start 729 drivers/staging/wusbcore/host/hwa-hc.c wusbhc->start = __hwahc_op_wusbhc_start; start 213 drivers/staging/wusbcore/host/whci/hcd.c .start = whc_start, start 266 drivers/staging/wusbcore/host/whci/hcd.c wusbhc->start = whc_wusbhc_start; start 50 drivers/staging/wusbcore/host/whci/init.c resource_size_t start, len; start 74 drivers/staging/wusbcore/host/whci/init.c start = whc->umc->resource.start; start 75 drivers/staging/wusbcore/host/whci/init.c len = whc->umc->resource.end - start + 1; start 76 drivers/staging/wusbcore/host/whci/init.c if (!request_mem_region(start, len, "whci-hc")) { start 81 drivers/staging/wusbcore/host/whci/init.c whc->base_phys = start; start 82 drivers/staging/wusbcore/host/whci/init.c whc->base = ioremap(start, len); start 155 drivers/staging/wusbcore/mmc.c ret = wusbhc->start(wusbhc); start 264 drivers/staging/wusbcore/wusbhc.h int (*start)(struct wusbhc *wusbhc); start 319 drivers/target/iscsi/cxgbit/cxgbit_ddp.c lldi->vr->iscsi.start, 2, start 320 drivers/target/iscsi/cxgbit/cxgbit_ddp.c lldi->vr->ppod_edram.start, start 1044 drivers/target/iscsi/iscsi_target_nego.c char *tmpbuf, *start = NULL, *end = NULL, *key, *value; start 1069 drivers/target/iscsi/iscsi_target_nego.c start = tmpbuf; start 1070 drivers/target/iscsi/iscsi_target_nego.c end = (start + payload_length); start 1076 drivers/target/iscsi/iscsi_target_nego.c while (start < end) { start 1077 drivers/target/iscsi/iscsi_target_nego.c if (iscsi_extract_key_value(start, &key, &value) < 0) { start 1089 drivers/target/iscsi/iscsi_target_nego.c start += strlen(key) + strlen(value) + 2; start 1358 drivers/target/iscsi/iscsi_target_parameters.c char *tmpbuf, *start = NULL, *end = NULL; start 1368 drivers/target/iscsi/iscsi_target_parameters.c start = tmpbuf; start 1369 drivers/target/iscsi/iscsi_target_parameters.c end = (start + length); start 1371 drivers/target/iscsi/iscsi_target_parameters.c while (start < end) { start 1375 drivers/target/iscsi/iscsi_target_parameters.c if (iscsi_extract_key_value(start, &key, &value) < 0) start 1393 drivers/target/iscsi/iscsi_target_parameters.c start += strlen(key) + strlen(value) + 2; start 1399 drivers/target/iscsi/iscsi_target_parameters.c start += strlen(key) + strlen(value) + 2; start 32 drivers/target/sbp/sbp_target.c .start = CSR_REGISTER_BASE + 0x10000, start 397 drivers/target/target_core_file.c loff_t start, end; start 411 drivers/target/target_core_file.c start = 0; start 414 drivers/target/target_core_file.c start = cmd->t_task_lba * dev->dev_attrib.block_size; start 416 drivers/target/target_core_file.c end = start + cmd->data_length - 1; start 421 drivers/target/target_core_file.c ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); start 650 drivers/target/target_core_file.c loff_t start = cmd->t_task_lba * start 655 drivers/target/target_core_file.c end = start + cmd->data_length - 1; start 659 drivers/target/target_core_file.c vfs_fsync_range(fd_dev->fd_file, start, end, 1); start 1376 drivers/target/target_core_sbc.c sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, start 1382 drivers/target/target_core_sbc.c sector_t sector = start; start 1389 drivers/target/target_core_sbc.c for (; psg && sector < start + sectors; psg = sg_next(psg)) { start 1394 drivers/target/target_core_sbc.c sector < start + sectors; start 599 drivers/target/target_core_user.c void *start = vaddr - offset; start 604 drivers/target/target_core_user.c flush_dcache_page(virt_to_page(start)); start 605 drivers/target/target_core_user.c start += PAGE_SIZE; start 1619 drivers/target/target_core_user.c int start, int end) start 1624 drivers/target/target_core_user.c for (i = start; i < end; i++) { start 2639 drivers/target/target_core_user.c u32 start, end, block, total_freed = 0; start 2673 drivers/target/target_core_user.c udev->dbi_thresh = start = 0; start 2676 drivers/target/target_core_user.c udev->dbi_thresh = start = block + 1; start 2681 drivers/target/target_core_user.c off = udev->data_off + start * DATA_BLOCK_SIZE; start 2685 drivers/target/target_core_user.c tcmu_blocks_release(&udev->data_blocks, start, end); start 2688 drivers/target/target_core_user.c total_freed += end - start; start 2689 drivers/target/target_core_user.c pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, start 199 drivers/target/target_core_xcopy.c unsigned short start = 0; start 223 drivers/target/target_core_xcopy.c while (start < tdll) { start 235 drivers/target/target_core_xcopy.c start += XCOPY_TARGET_DESC_LEN; start 331 drivers/target/target_core_xcopy.c unsigned int start = 0; start 350 drivers/target/target_core_xcopy.c while (start < sdll) { start 361 drivers/target/target_core_xcopy.c start += XCOPY_SEGMENT_DESC_LEN; start 119 drivers/tc/tc.c tdev->resource.start = slotaddr; start 122 drivers/tc/tc.c tdev->resource.start = extslotaddr; start 170 drivers/tc/tc.c tc_bus.resource[0].start = tc_bus.slot_base; start 182 drivers/tc/tc.c tc_bus.resource[1].start = tc_bus.ext_slot_base; start 552 drivers/tee/optee/call.c static int check_mem_type(unsigned long start, size_t num_pages) start 561 drivers/tee/optee/call.c if (virt_addr_valid(start)) start 565 drivers/tee/optee/call.c rc = __check_mem_type(find_vma(mm, start), start 566 drivers/tee/optee/call.c start + num_pages * PAGE_SIZE); start 574 drivers/tee/optee/call.c unsigned long start) start 585 drivers/tee/optee/call.c rc = check_mem_type(start, num_pages); start 649 drivers/tee/optee/call.c unsigned long start) start 655 drivers/tee/optee/call.c return check_mem_type(start, num_pages); start 469 drivers/tee/optee/core.c begin = roundup(res.result.start, PAGE_SIZE); start 470 drivers/tee/optee/core.c end = rounddown(res.result.start + res.result.size, PAGE_SIZE); start 158 drivers/tee/optee/optee_private.h unsigned long start); start 163 drivers/tee/optee/optee_private.h unsigned long start); start 181 drivers/tee/optee/optee_smc.h unsigned long start; start 952 drivers/tee/tee_core.c tee_client_open_context(struct tee_context *start, start 963 drivers/tee/tee_core.c if (start) start 964 drivers/tee/tee_core.c dev = &start->teedev->dev; start 231 drivers/tee/tee_shm.c unsigned long start; start 258 drivers/tee/tee_shm.c start = rounddown(addr, PAGE_SIZE); start 259 drivers/tee/tee_shm.c shm->offset = addr - start; start 261 drivers/tee/tee_shm.c num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; start 268 drivers/tee/tee_shm.c rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages); start 288 drivers/tee/tee_shm.c shm->num_pages, start); start 63 drivers/thermal/broadcom/sr-thermal.c sr_thermal->regs = (void __iomem *)devm_memremap(&pdev->dev, res->start, start 535 drivers/thermal/rcar_thermal.c ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, start 888 drivers/thermal/samsung/exynos_tmu.c data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); start 945 drivers/thermal/samsung/exynos_tmu.c if (res.start == EXYNOS5433_G3D_BASE) start 987 drivers/thermal/samsung/exynos_tmu.c data->base_second = devm_ioremap(&pdev->dev, res.start, start 449 drivers/thunderbolt/domain.c if (tb->cm_ops->start) { start 450 drivers/thunderbolt/domain.c ret = tb->cm_ops->start(tb); start 2078 drivers/thunderbolt/icm.c .start = icm_start, start 2094 drivers/thunderbolt/icm.c .start = icm_start, start 2116 drivers/thunderbolt/icm.c .start = icm_start, start 2138 drivers/thunderbolt/icm.c .start = icm_start, start 33 drivers/thunderbolt/lc.c int start, phys, ret, size; start 41 drivers/thunderbolt/lc.c start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; start 45 drivers/thunderbolt/lc.c return sw->cap_lc + start + phys * size; start 146 drivers/thunderbolt/lc.c int start, size, nlc, ret, i; start 158 drivers/thunderbolt/lc.c start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; start 163 drivers/thunderbolt/lc.c unsigned int offset = sw->cap_lc + start + i * size; start 765 drivers/thunderbolt/switch.c struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, start 771 drivers/thunderbolt/switch.c return start; start 779 drivers/thunderbolt/switch.c if (start->sw->config.depth < end->sw->config.depth) { start 770 drivers/thunderbolt/tb.c .start = tb_start, start 270 drivers/thunderbolt/tb.h int (*start)(struct tb *tb); start 602 drivers/thunderbolt/tb.h struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, start 1576 drivers/tty/amiserial.c .start = rs_start, start 4002 drivers/tty/cyclades.c .start = cy_start, start 310 drivers/tty/goldfish.c base = ioremap(r->start, 0x1000); start 322 drivers/tty/goldfish.c irq = r->start; start 1326 drivers/tty/hvc/hvc_iucv.c void *start, *end; start 1334 drivers/tty/hvc/hvc_iucv.c start = hvc_iucv_filter + (8 * index); start 1335 drivers/tty/hvc/hvc_iucv.c end = memchr(start, ' ', 8); start 1336 drivers/tty/hvc/hvc_iucv.c len = (end) ? end - start : 8; start 1337 drivers/tty/hvc/hvc_iucv.c memcpy(buffer + rc, start, len); start 95 drivers/tty/ipwireless/main.c if (!request_region(p_dev->resource[0]->start, start 115 drivers/tty/ipwireless/main.c ipw->common_memory = ioremap(p_dev->resource[2]->start, start 121 drivers/tty/ipwireless/main.c if (!request_mem_region(p_dev->resource[2]->start, start 139 drivers/tty/ipwireless/main.c ipw->attr_memory = ioremap(p_dev->resource[3]->start, start 145 drivers/tty/ipwireless/main.c if (!request_mem_region(p_dev->resource[3]->start, start 157 drivers/tty/ipwireless/main.c release_mem_region(p_dev->resource[2]->start, start 162 drivers/tty/ipwireless/main.c release_region(p_dev->resource[0]->start, start 184 drivers/tty/ipwireless/main.c ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start, start 226 drivers/tty/ipwireless/main.c release_mem_region(link->resource[2]->start, start 231 drivers/tty/ipwireless/main.c release_mem_region(link->resource[3]->start, start 241 drivers/tty/ipwireless/main.c release_region(ipw->link->resource[0]->start, start 244 drivers/tty/ipwireless/main.c release_mem_region(ipw->link->resource[2]->start, start 249 drivers/tty/ipwireless/main.c release_mem_region(ipw->link->resource[3]->start, start 1256 drivers/tty/isicom.c .start = isicom_start, start 901 drivers/tty/mips_ejtag_fdc.c priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start, start 403 drivers/tty/moxa.c .start = moxa_start, start 2325 drivers/tty/mxser.c .start = mxser_start, start 545 drivers/tty/pty.c .start = pty_start, start 794 drivers/tty/pty.c .start = pty_start, start 2322 drivers/tty/rocket.c .start = rp_start, start 331 drivers/tty/serial/8250/8250_aspeed_vuart.c port.port.mapbase = res->start; start 355 drivers/tty/serial/8250/8250_core.c unsigned long start = UART_RSA_BASE << up->port.regshift; start 363 drivers/tty/serial/8250/8250_core.c start += port->iobase; start 364 drivers/tty/serial/8250/8250_core.c if (request_region(start, size, "serial-rsa")) start 406 drivers/tty/serial/8250/8250_dw.c p->mapbase = regs->start; start 419 drivers/tty/serial/8250/8250_dw.c p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); start 103 drivers/tty/serial/8250/8250_em.c up.port.mapbase = regs->start; start 104 drivers/tty/serial/8250/8250_em.c up.port.irq = irq->start; start 44 drivers/tty/serial/8250/8250_gsc.c (unsigned long long)dev->hpa.start); start 48 drivers/tty/serial/8250/8250_gsc.c address = dev->hpa.start; start 176 drivers/tty/serial/8250/8250_hp300.c uart.port.mapbase = (d->resource.start + UART_OFFSET); start 190 drivers/tty/serial/8250/8250_hp300.c out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); start 194 drivers/tty/serial/8250/8250_hp300.c out_8(d->resource.start + DIO_VIRADDRBASE + DCA_ID, 0xff); start 293 drivers/tty/serial/8250/8250_hp300.c if (d->resource.start) { start 295 drivers/tty/serial/8250/8250_hp300.c out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, 0); start 237 drivers/tty/serial/8250/8250_ingenic.c uart.port.mapbase = regs->start; start 241 drivers/tty/serial/8250/8250_ingenic.c uart.port.irq = irq->start; start 252 drivers/tty/serial/8250/8250_ingenic.c uart.port.membase = devm_ioremap(&pdev->dev, regs->start, start 120 drivers/tty/serial/8250/8250_lpc18xx.c uart.port.membase = devm_ioremap(&pdev->dev, res->start, start 164 drivers/tty/serial/8250/8250_lpc18xx.c uart.port.mapbase = res->start; start 118 drivers/tty/serial/8250/8250_men_mcb.c data[i].uart.port.mapbase = (unsigned long) mem->start start 495 drivers/tty/serial/8250/8250_mtk.c uart.port.membase = devm_ioremap(&pdev->dev, regs->start, start 512 drivers/tty/serial/8250/8250_mtk.c uart.port.mapbase = regs->start; start 513 drivers/tty/serial/8250/8250_mtk.c uart.port.irq = irq->start; start 102 drivers/tty/serial/8250/8250_of.c port->iobase = resource.start; start 104 drivers/tty/serial/8250/8250_of.c port->mapbase = resource.start; start 1150 drivers/tty/serial/8250/8250_omap.c membase = devm_ioremap_nocache(&pdev->dev, regs->start, start 1157 drivers/tty/serial/8250/8250_omap.c up.port.mapbase = regs->start; start 1159 drivers/tty/serial/8250/8250_omap.c up.port.irq = irq->start; start 729 drivers/tty/serial/8250/8250_pci.c device_window = ((region.start + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00) start 925 drivers/tty/serial/8250/8250_pci.c release_region(iobase->start, ITE_887x_IOSIZE); start 937 drivers/tty/serial/8250/8250_pci.c type = inb(iobase->start + 0x18) & 0x0f; start 986 drivers/tty/serial/8250/8250_pci.c release_region(iobase->start, ITE_887x_IOSIZE); start 122 drivers/tty/serial/8250/8250_pxa.c uart.port.mapbase = mmres->start; start 124 drivers/tty/serial/8250/8250_pxa.c uart.port.irq = irqres->start; start 174 drivers/tty/serial/8250/8250_uniphier.c membase = devm_ioremap(dev, regs->start, resource_size(regs)); start 211 drivers/tty/serial/8250/8250_uniphier.c up.port.mapbase = regs->start; start 383 drivers/tty/serial/8250/serial_cs.c port = p_dev->resource[1]->start; start 387 drivers/tty/serial/8250/serial_cs.c port = p_dev->resource[0]->start + 0x28; start 402 drivers/tty/serial/8250/serial_cs.c if (p_dev->resource[0]->start == 0) start 432 drivers/tty/serial/8250/serial_cs.c p_dev->resource[0]->start = base[j]; start 478 drivers/tty/serial/8250/serial_cs.c return setup_serial(link, info, link->resource[0]->start, link->irq); start 510 drivers/tty/serial/8250/serial_cs.c p_dev->resource[0]->start + 8 != p_dev->resource[1]->start) start 520 drivers/tty/serial/8250/serial_cs.c *base2 = p_dev->resource[0]->start + 8; start 531 drivers/tty/serial/8250/serial_cs.c base2 = link->resource[0]->start + 8; start 568 drivers/tty/serial/8250/serial_cs.c base2 = link->resource[0]->start; start 570 drivers/tty/serial/8250/serial_cs.c err = setup_serial(link, info, link->resource[0]->start, start 585 drivers/tty/serial/8250/serial_cs.c setup_serial(link, info, link->resource[0]->start, link->irq); start 437 drivers/tty/serial/altera_jtaguart.c port->mapbase = res_mem->start; start 445 drivers/tty/serial/altera_jtaguart.c port->irq = res_irq->start; start 576 drivers/tty/serial/altera_uart.c port->mapbase = res_mem->start; start 584 drivers/tty/serial/altera_uart.c port->irq = res_irq->start; start 716 drivers/tty/serial/amba-pl010.c base = devm_ioremap(&dev->dev, dev->res.start, start 726 drivers/tty/serial/amba-pl010.c uap->port.mapbase = dev->res.start; start 2583 drivers/tty/serial/amba-pl011.c uap->port.mapbase = mmiobase->start; start 683 drivers/tty/serial/ar933x_uart.c port->mapbase = mem_res->start; start 685 drivers/tty/serial/ar933x_uart.c port->irq = irq_res->start; start 2499 drivers/tty/serial/atmel_serial.c port->mapbase = mpdev->resource[0].start; start 2500 drivers/tty/serial/atmel_serial.c port->irq = mpdev->resource[1].start; start 837 drivers/tty/serial/bcm63xx_uart.c port->mapbase = res_mem->start; start 854 drivers/tty/serial/bcm63xx_uart.c port->irq = res_irq->start; start 479 drivers/tty/serial/clps711x.c s->port.mapbase = res->start; start 62 drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c pram = ioremap(res.start, len); start 475 drivers/tty/serial/digicolor-usart.c dp->port.mapbase = res->start; start 747 drivers/tty/serial/efm32-uart.c efm_port->port.mapbase = res->start; start 855 drivers/tty/serial/fsl_linflexuart.c sport->mapbase = res->start; start 2392 drivers/tty/serial/fsl_lpuart.c sport->port.mapbase = res->start; start 2254 drivers/tty/serial/imx.c sport->port.mapbase = res->start; start 544 drivers/tty/serial/lantiq.c res = devm_request_mem_region(&pdev->dev, res->start, start 731 drivers/tty/serial/lantiq.c ltq_port->tx_irq = irqres[0].start; start 732 drivers/tty/serial/lantiq.c ltq_port->rx_irq = irqres[1].start; start 733 drivers/tty/serial/lantiq.c ltq_port->err_irq = irqres[2].start; start 734 drivers/tty/serial/lantiq.c port->irq = irqres[0].start; start 851 drivers/tty/serial/lantiq.c if (mmres->start == CPHYSADDR(LTQ_EARLY_ASC)) start 875 drivers/tty/serial/lantiq.c port->mapbase = mmres->start; start 657 drivers/tty/serial/lpc32xx_hs.c p->port.mapbase = res->start; start 750 drivers/tty/serial/men_z135_uart.c port->mapbase = mem->start; start 753 drivers/tty/serial/men_z135_uart.c port->membase = ioremap(mem->start, resource_size(mem)); start 841 drivers/tty/serial/men_z135_uart.c uart->port.mapbase = mem->start; start 702 drivers/tty/serial/meson_uart.c port->mapbase = res_mem->start; start 704 drivers/tty/serial/meson_uart.c port->irq = res_irq->start; start 528 drivers/tty/serial/milbeaut_usio.c port->membase = devm_ioremap(&pdev->dev, res->start, start 1655 drivers/tty/serial/mpc52xx_uart.c port->mapbase = res.start; start 1656 drivers/tty/serial/mpc52xx_uart.c port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc)); start 1785 drivers/tty/serial/mpc52xx_uart.c port->mapbase = res.start; start 565 drivers/tty/serial/mps2-uart.c mps_port->port.mapbase = res->start; start 1807 drivers/tty/serial/msm_serial.c port->mapbase = resource->start; start 93 drivers/tty/serial/mux.c status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32); start 459 drivers/tty/serial/mux.c request_mem_region(dev->hpa.start + MUX_OFFSET, start 475 drivers/tty/serial/mux.c port->mapbase = dev->hpa.start + MUX_OFFSET + start 509 drivers/tty/serial/mux.c if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET) start 523 drivers/tty/serial/mux.c release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET); start 850 drivers/tty/serial/mvebu-uart.c port->mapbase = reg->start; start 1685 drivers/tty/serial/mxs-auart.c s->port.mapbase = r->start; start 1686 drivers/tty/serial/mxs-auart.c s->port.membase = ioremap(r->start, resource_size(r)); start 1713 drivers/tty/serial/omap-serial.c up->port.mapbase = mem->start; start 693 drivers/tty/serial/owl-uart.c owl_port->port.mapbase = res_mem->start; start 860 drivers/tty/serial/pic32_uart.c port->mapbase = res_mem->start; start 1413 drivers/tty/serial/pmac_zilog.c uap->port.mapbase = r_ports.start; start 1431 drivers/tty/serial/pmac_zilog.c uap->tx_dma_regs = ioremap(r_txdma.start, 0x100); start 1436 drivers/tty/serial/pmac_zilog.c uap->rx_dma_regs = ioremap(r_rxdma.start, 0x100); start 1711 drivers/tty/serial/pmac_zilog.c uap->port.mapbase = r_ports->start; start 1712 drivers/tty/serial/pmac_zilog.c uap->port.membase = (unsigned char __iomem *) r_ports->start; start 800 drivers/tty/serial/pnx8xxx_uart.c if (pnx8xxx_ports[i].port.mapbase != res->start) start 875 drivers/tty/serial/pxa.c sport->port.mapbase = mmres->start; start 876 drivers/tty/serial/pxa.c sport->port.irq = irqres->start; start 895 drivers/tty/serial/pxa.c sport->port.membase = ioremap(mmres->start, resource_size(mmres)); start 1304 drivers/tty/serial/qcom_geni_serial.c uport->mapbase = res->start; start 761 drivers/tty/serial/rda-uart.c rda_port->port.mapbase = res_mem->start; start 896 drivers/tty/serial/sa1100.c if (sa1100_ports[i].port.mapbase != res->start) start 1778 drivers/tty/serial/samsung.c port->membase = devm_ioremap(port->dev, res->start, resource_size(res)); start 1784 drivers/tty/serial/samsung.c port->mapbase = res->start; start 998 drivers/tty/serial/sccnxp.c s->port[i].mapbase = res->start; start 1587 drivers/tty/serial/serial-tegra.c u->mapbase = resource->start; start 2480 drivers/tty/serial/serial_core.c .start = uart_start, start 2901 drivers/tty/serial/sh-sci.c port->mapbase = res->start; start 940 drivers/tty/serial/sifive.c ssp->port.mapbase = mem->start; start 1361 drivers/tty/serial/sirfsoc_uart.c port->mapbase = res->start; start 1363 drivers/tty/serial/sirfsoc_uart.c res->start, resource_size(res)); start 1375 drivers/tty/serial/sirfsoc_uart.c port->irq = res->start; start 1209 drivers/tty/serial/sprd_serial.c up->mapbase = res->start; start 738 drivers/tty/serial/st-asc.c port->mapbase = res->start; start 951 drivers/tty/serial/stm32-usart.c port->mapbase = res->start; start 976 drivers/tty/serial/sunsab.c up->port.mapbase = op->resource[0].start + offset; start 1463 drivers/tty/serial/sunsu.c up->port.mapbase = rp->start; start 1435 drivers/tty/serial/sunzilog.c up[0].port.mapbase = op->resource[0].start + 0x00; start 1452 drivers/tty/serial/sunzilog.c up[1].port.mapbase = op->resource[0].start + 0x04; start 444 drivers/tty/serial/timbuart.c uart->port.mapbase = iomem->start; start 851 drivers/tty/serial/uartlite.c ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata); start 1268 drivers/tty/serial/ucc_uart.c if (!res.start) { start 1273 drivers/tty/serial/ucc_uart.c qe_port->port.mapbase = res.start; start 1422 drivers/tty/serial/ucc_uart.c qe_port->us_info.regs = (phys_addr_t) res.start; start 714 drivers/tty/serial/vr41xx_siu.c port->mapbase = res->start; start 699 drivers/tty/serial/vt8500_serial.c vt8500_port->uart.mapbase = mmres->start; start 700 drivers/tty/serial/vt8500_serial.c vt8500_port->uart.irq = irqres->start; start 1542 drivers/tty/serial/xilinx_uartps.c port->mapbase = res->start; start 4282 drivers/tty/synclink.c .start = mgsl_start, start 1823 drivers/tty/synclink_gt.c unsigned int start, end; start 1832 drivers/tty/synclink_gt.c start = end = info->rbuf_current; start 1879 drivers/tty/synclink_gt.c if (end == start) start 3697 drivers/tty/synclink_gt.c .start = tx_release, start 4611 drivers/tty/synclink_gt.c unsigned int start, end; start 4628 drivers/tty/synclink_gt.c start = end = info->rbuf_current; start 4672 drivers/tty/synclink_gt.c free_rbufs(info, start, end); start 4694 drivers/tty/synclink_gt.c DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx"); start 4707 drivers/tty/synclink_gt.c int i = start; start 4735 drivers/tty/synclink_gt.c free_rbufs(info, start, end); start 936 drivers/tty/synclinkmp.c goto start; start 940 drivers/tty/synclinkmp.c goto start; start 971 drivers/tty/synclinkmp.c start: start 3883 drivers/tty/synclinkmp.c .start = tx_release, start 805 drivers/tty/tty_io.c if (tty->ops->start) start 806 drivers/tty/tty_io.c tty->ops->start(tty); start 242 drivers/tty/tty_ldisc.c .start = tty_ldiscs_seq_start, start 1608 drivers/tty/vt/keyboard.c .start = kbd_start, start 651 drivers/tty/vt/vt.c static void do_update_region(struct vc_data *vc, unsigned long start, int count) start 656 drivers/tty/vt/vt.c p = (u16 *) start; start 658 drivers/tty/vt/vt.c offset = (start - vc->vc_origin) / 2; start 663 drivers/tty/vt/vt.c start = vc->vc_sw->con_getxy(vc, start, &nxx, &nyy); start 689 drivers/tty/vt/vt.c p = (u16 *)start; start 690 drivers/tty/vt/vt.c start = vc->vc_sw->con_getxy(vc, start, NULL, NULL); start 695 drivers/tty/vt/vt.c void update_region(struct vc_data *vc, unsigned long start, int count) start 701 drivers/tty/vt/vt.c do_update_region(vc, start, count); start 1531 drivers/tty/vt/vt.c unsigned short * start; start 1540 drivers/tty/vt/vt.c start = (unsigned short *)vc->vc_pos; start 1546 drivers/tty/vt/vt.c start = (unsigned short *)vc->vc_origin; start 1554 drivers/tty/vt/vt.c start = (unsigned short *)vc->vc_origin; start 1559 drivers/tty/vt/vt.c scr_memsetw(start, vc->vc_video_erase_char, 2 * count); start 1561 drivers/tty/vt/vt.c do_update_region(vc, (unsigned long) start, count); start 1568 drivers/tty/vt/vt.c unsigned short *start = (unsigned short *)vc->vc_pos; start 1588 drivers/tty/vt/vt.c scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); start 1591 drivers/tty/vt/vt.c do_update_region(vc, (unsigned long)(start + offset), count); start 2951 drivers/tty/vt/vt.c const ushort *start; start 2978 drivers/tty/vt/vt.c start = (ushort *)vc->vc_pos; start 2985 drivers/tty/vt/vt.c vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); start 2989 drivers/tty/vt/vt.c start = (ushort *)vc->vc_pos; start 2996 drivers/tty/vt/vt.c start = (ushort *)vc->vc_pos; start 3013 drivers/tty/vt/vt.c vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); start 3438 drivers/tty/vt/vt.c .start = con_start, start 141 drivers/uio/uio.c return sprintf(buf, "0x%lx\n", port->start); start 168 drivers/uio/uio.c __ATTR(start, S_IRUGO, portio_start_show, NULL); start 85 drivers/uio/uio_aec.c info->port[0].start = pci_resource_start(pdev, 0); start 86 drivers/uio/uio_aec.c if (!info->port[0].start) start 225 drivers/uio/uio_dmem_genirq.c uiomem->addr = r->start; start 242 drivers/uio/uio_fsl_elbc_gpcm.c if ((reg_br & reg_or & BR_BA) != fsl_lbc_addr(res->start)) { start 354 drivers/uio/uio_fsl_elbc_gpcm.c != fsl_lbc_addr(res.start)) { start 377 drivers/uio/uio_fsl_elbc_gpcm.c reg_br_new |= fsl_lbc_addr(res.start) | BR_MS_GPCM | BR_V; start 382 drivers/uio/uio_fsl_elbc_gpcm.c info->mem[0].internal_addr = ioremap(res.start, resource_size(&res)); start 391 drivers/uio/uio_fsl_elbc_gpcm.c info->mem[0].addr = res.start; start 423 drivers/uio/uio_fsl_elbc_gpcm.c priv->name, (unsigned long long)res.start, priv->bank, start 119 drivers/uio/uio_mf624.c resource_size_t start = pci_resource_start(dev, bar); start 123 drivers/uio/uio_mf624.c mem->addr = start & PAGE_MASK; start 124 drivers/uio/uio_mf624.c mem->offs = start & ~PAGE_MASK; start 127 drivers/uio/uio_mf624.c mem->size = ((start & ~PAGE_MASK) + len + PAGE_SIZE - 1) & PAGE_MASK; start 181 drivers/uio/uio_pdrv_genirq.c uiomem->addr = r->start; start 159 drivers/uio/uio_pruss.c if (!regs_prussio->start) { start 186 drivers/uio/uio_pruss.c gdev->prussio_vaddr = ioremap(regs_prussio->start, len); start 197 drivers/uio/uio_pruss.c p->mem[0].addr = regs_prussio->start; start 128 drivers/usb/c67x00/c67x00-drv.c if (!request_mem_region(res->start, resource_size(res), start 134 drivers/usb/c67x00/c67x00-drv.c c67x00->hpi.base = ioremap(res->start, resource_size(res)); start 149 drivers/usb/c67x00/c67x00-drv.c ret = request_irq(res2->start, c67x00_irq, 0, pdev->name, c67x00); start 169 drivers/usb/c67x00/c67x00-drv.c free_irq(res2->start, c67x00); start 173 drivers/usb/c67x00/c67x00-drv.c release_mem_region(res->start, resource_size(res)); start 193 drivers/usb/c67x00/c67x00-drv.c free_irq(res->start, c67x00); start 199 drivers/usb/c67x00/c67x00-drv.c release_mem_region(res->start, resource_size(res)); start 303 drivers/usb/c67x00/c67x00-hcd.c .start = c67x00_hcd_start, start 110 drivers/usb/cdns3/cdns3-pci-wrap.c res[RES_DEV_ID].start = pci_resource_start(pdev, PCI_BAR_DEV); start 115 drivers/usb/cdns3/cdns3-pci-wrap.c &res[RES_DEV_ID].start); start 117 drivers/usb/cdns3/cdns3-pci-wrap.c res[RES_HOST_ID].start = pci_resource_start(pdev, PCI_BAR_HOST); start 122 drivers/usb/cdns3/cdns3-pci-wrap.c &res[RES_HOST_ID].start); start 125 drivers/usb/cdns3/cdns3-pci-wrap.c wrap->dev_res[RES_IRQ_HOST_ID].start = pdev->irq; start 130 drivers/usb/cdns3/cdns3-pci-wrap.c wrap->dev_res[RES_IRQ_PERIPHERAL_ID].start = pdev->irq; start 134 drivers/usb/cdns3/cdns3-pci-wrap.c res[RES_DRD_ID].start = pci_resource_start(pdev, PCI_BAR_OTG); start 139 drivers/usb/cdns3/cdns3-pci-wrap.c &res[RES_DRD_ID].start); start 142 drivers/usb/cdns3/cdns3-pci-wrap.c wrap->dev_res[RES_IRQ_OTG_ID].start = pdev->irq; start 55 drivers/usb/cdns3/core.c ret = cdns->roles[role]->start(cdns); start 273 drivers/usb/cdns3/core.c rdrv->start = cdns3_idle_role_start; start 30 drivers/usb/cdns3/core.h int (*start)(struct cdns3 *cdns); start 2774 drivers/usb/cdns3/gadget.c rdrv->start = __cdns3_gadget_init; start 67 drivers/usb/cdns3/host.c rdrv->start = __cdns3_host_init; start 132 drivers/usb/chipidea/ci.h int (*start)(struct ci_hdrc *); start 277 drivers/usb/chipidea/ci.h ret = ci->roles[role]->start(ci); start 89 drivers/usb/chipidea/ci_hdrc_pci.c res[0].start = pci_resource_start(pdev, 0); start 92 drivers/usb/chipidea/ci_hdrc_pci.c res[1].start = pdev->irq; start 1072 drivers/usb/chipidea/core.c ci->hw_bank.phys = res->start; start 363 drivers/usb/chipidea/host.c rdrv->start = host_start; start 2050 drivers/usb/chipidea/udc.c rdrv->start = udc_id_switch_for_device; start 161 drivers/usb/core/devices.c static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, start 167 drivers/usb/core/devices.c if (start > end) start 168 drivers/usb/core/devices.c return start; start 204 drivers/usb/core/devices.c return start; start 215 drivers/usb/core/devices.c start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, start 220 drivers/usb/core/devices.c return start; start 223 drivers/usb/core/devices.c static char *usb_dump_interface_descriptor(char *start, char *end, start 232 drivers/usb/core/devices.c if (start > end) start 233 drivers/usb/core/devices.c return start; start 241 drivers/usb/core/devices.c start += sprintf(start, format_iface, start 251 drivers/usb/core/devices.c return start; start 254 drivers/usb/core/devices.c static char *usb_dump_interface(int speed, char *start, char *end, start 261 drivers/usb/core/devices.c start = usb_dump_interface_descriptor(start, end, intfc, iface, setno); start 263 drivers/usb/core/devices.c if (start > end) start 264 drivers/usb/core/devices.c return start; start 265 drivers/usb/core/devices.c start = usb_dump_endpoint_descriptor(speed, start 266 drivers/usb/core/devices.c start, end, &desc->endpoint[i].desc); start 268 drivers/usb/core/devices.c return start; start 271 drivers/usb/core/devices.c static char *usb_dump_iad_descriptor(char *start, char *end, start 274 drivers/usb/core/devices.c if (start > end) start 275 drivers/usb/core/devices.c return start; start 276 drivers/usb/core/devices.c start += sprintf(start, format_iad, start 283 drivers/usb/core/devices.c return start; start 291 drivers/usb/core/devices.c static char *usb_dump_config_descriptor(char *start, char *end, start 297 drivers/usb/core/devices.c if (start > end) start 298 drivers/usb/core/devices.c return start; start 303 drivers/usb/core/devices.c start += sprintf(start, format_config, start 310 drivers/usb/core/devices.c return start; start 313 drivers/usb/core/devices.c static char *usb_dump_config(int speed, char *start, char *end, start 320 drivers/usb/core/devices.c if (start > end) start 321 drivers/usb/core/devices.c return start; start 324 drivers/usb/core/devices.c return start + sprintf(start, "(null Cfg. desc.)\n"); start 325 drivers/usb/core/devices.c start = usb_dump_config_descriptor(start, end, &config->desc, active, start 330 drivers/usb/core/devices.c start = usb_dump_iad_descriptor(start, end, start 337 drivers/usb/core/devices.c if (start > end) start 338 drivers/usb/core/devices.c return start; start 339 drivers/usb/core/devices.c start = usb_dump_interface(speed, start 340 drivers/usb/core/devices.c start, end, intfc, interface, j); start 343 drivers/usb/core/devices.c return start; start 349 drivers/usb/core/devices.c static char *usb_dump_device_descriptor(char *start, char *end, start 355 drivers/usb/core/devices.c if (start > end) start 356 drivers/usb/core/devices.c return start; start 357 drivers/usb/core/devices.c start += sprintf(start, format_device1, start 365 drivers/usb/core/devices.c if (start > end) start 366 drivers/usb/core/devices.c return start; start 367 drivers/usb/core/devices.c start += sprintf(start, format_device2, start 371 drivers/usb/core/devices.c return start; start 377 drivers/usb/core/devices.c static char *usb_dump_device_strings(char *start, char *end, start 380 drivers/usb/core/devices.c if (start > end) start 381 drivers/usb/core/devices.c return start; start 383 drivers/usb/core/devices.c start += sprintf(start, format_string_manufacturer, start 385 drivers/usb/core/devices.c if (start > end) start 388 drivers/usb/core/devices.c start += sprintf(start, format_string_product, dev->product); start 389 drivers/usb/core/devices.c if (start > end) start 393 drivers/usb/core/devices.c start += sprintf(start, format_string_serialnumber, start 397 drivers/usb/core/devices.c return start; start 400 drivers/usb/core/devices.c static char *usb_dump_desc(char *start, char *end, struct usb_device *dev) start 404 drivers/usb/core/devices.c if (start > end) start 405 drivers/usb/core/devices.c return start; start 407 drivers/usb/core/devices.c start = usb_dump_device_descriptor(start, end, &dev->descriptor); start 409 drivers/usb/core/devices.c if (start > end) start 410 drivers/usb/core/devices.c return start; start 412 drivers/usb/core/devices.c start = usb_dump_device_strings(start, end, dev); start 415 drivers/usb/core/devices.c if (start > end) start 416 drivers/usb/core/devices.c return start; start 417 drivers/usb/core/devices.c start = usb_dump_config(dev->speed, start 418 drivers/usb/core/devices.c start, end, dev->config + i, start 422 drivers/usb/core/devices.c return start; start 428 drivers/usb/core/devices.c static char *usb_dump_hub_descriptor(char *start, char *end, start 434 drivers/usb/core/devices.c if (start > end) start 435 drivers/usb/core/devices.c return start; start 436 drivers/usb/core/devices.c start += sprintf(start, "Interface:"); start 437 drivers/usb/core/devices.c while (leng && start <= end) { start 438 drivers/usb/core/devices.c start += sprintf(start, " %02x", *ptr); start 441 drivers/usb/core/devices.c *start++ = '\n'; start 442 drivers/usb/core/devices.c return start; start 445 drivers/usb/core/devices.c static char *usb_dump_string(char *start, char *end, start 448 drivers/usb/core/devices.c if (start > end) start 449 drivers/usb/core/devices.c return start; start 450 drivers/usb/core/devices.c start += sprintf(start, "Interface:"); start 453 drivers/usb/core/devices.c start += sprintf(start, "%s: %.100s ", id, start 455 drivers/usb/core/devices.c return start; start 2789 drivers/usb/core/hcd.c retval = hcd->driver->start(hcd); start 388 drivers/usb/dwc2/core.c ktime_t start; start 395 drivers/usb/dwc2/core.c start = ktime_get(); start 407 drivers/usb/dwc2/core.c ms = ktime_to_ms(ktime_sub(end, start)); start 4907 drivers/usb/dwc2/hcd.c .start = _dwc2_hcd_start, start 5075 drivers/usb/dwc2/hcd.c hcd->rsrc_start = res->start; start 234 drivers/usb/dwc2/hcd_queue.c int interval, int start, bool only_one_period) start 251 drivers/usb/dwc2/hcd_queue.c if (start >= interval_bits) start 256 drivers/usb/dwc2/hcd_queue.c first_end = (start / bits_per_period + 1) * bits_per_period; start 267 drivers/usb/dwc2/hcd_queue.c while (start + num_bits <= first_end) { start 271 drivers/usb/dwc2/hcd_queue.c end = (start / bits_per_period + 1) * bits_per_period; start 274 drivers/usb/dwc2/hcd_queue.c start = bitmap_find_next_zero_area(map, end, start, num_bits, start 282 drivers/usb/dwc2/hcd_queue.c if (start >= end) { start 283 drivers/usb/dwc2/hcd_queue.c start = end; start 289 drivers/usb/dwc2/hcd_queue.c int ith_start = start + interval_bits * i; start 307 drivers/usb/dwc2/hcd_queue.c start = end; start 309 drivers/usb/dwc2/hcd_queue.c start = ith_start - interval_bits * i; start 318 drivers/usb/dwc2/hcd_queue.c if (start + num_bits > first_end) start 322 drivers/usb/dwc2/hcd_queue.c int ith_start = start + interval_bits * i; start 327 drivers/usb/dwc2/hcd_queue.c return start; start 342 drivers/usb/dwc2/hcd_queue.c int interval, int start) start 355 drivers/usb/dwc2/hcd_queue.c int ith_start = start + interval_bits * i; start 454 drivers/usb/dwc2/hcd_queue.c int start = 0; start 465 drivers/usb/dwc2/hcd_queue.c start = i - period_start; start 481 drivers/usb/dwc2/hcd_queue.c cat_printf(&buf, &buf_size, "%d %s -%3d %s", start, start 482 drivers/usb/dwc2/hcd_queue.c units, start + count - 1, units); start 129 drivers/usb/dwc2/pci.c res[0].start = pci_resource_start(pci, 0); start 134 drivers/usb/dwc2/pci.c res[1].start = pci->irq; start 401 drivers/usb/dwc2/platform.c (unsigned long)res->start, hsotg->regs); start 1425 drivers/usb/dwc3/core.c dwc->xhci_resources[0].start = res->start; start 1426 drivers/usb/dwc3/core.c dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + start 1436 drivers/usb/dwc3/core.c dwc_res.start += DWC3_GLOBALS_REGS_START; start 62 drivers/usb/dwc3/dwc3-haps.c res[0].start = pci_resource_start(pci, 0); start 67 drivers/usb/dwc3/dwc3-haps.c res[1].start = pci->irq; start 243 drivers/usb/dwc3/dwc3-pci.c res[0].start = pci_resource_start(pci, 0); start 248 drivers/usb/dwc3/dwc3-pci.c res[1].start = pci->irq; start 485 drivers/usb/dwc3/dwc3-qcom.c child_res[0].start = res->start; start 486 drivers/usb/dwc3/dwc3-qcom.c child_res[0].end = child_res[0].start + start 491 drivers/usb/dwc3/dwc3-qcom.c child_res[1].start = child_res[1].end = irq; start 613 drivers/usb/dwc3/dwc3-qcom.c parent_res->start = res->start + start 615 drivers/usb/dwc3/dwc3-qcom.c parent_res->end = parent_res->start + start 227 drivers/usb/dwc3/dwc3-st.c dwc3_data->syscfg_reg_off = res->start; start 66 drivers/usb/dwc3/host.c dwc->xhci_resources[1].start = irq; start 123 drivers/usb/gadget/composite.c #define for_each_ep_desc(start, ep_desc) \ start 124 drivers/usb/gadget/composite.c for (ep_desc = next_ep_desc(start); \ start 1300 drivers/usb/gadget/function/f_mass_storage.c int loej, start; start 1314 drivers/usb/gadget/function/f_mass_storage.c start = common->cmnd[4] & 0x01; start 1320 drivers/usb/gadget/function/f_mass_storage.c if (start) { start 661 drivers/usb/gadget/function/f_printer.c printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync) start 67 drivers/usb/gadget/udc/bdc/bdc_pci.c res[0].start = pci_resource_start(pci, 0); start 72 drivers/usb/gadget/udc/bdc/bdc_pci.c res[1].start = pci->irq; start 2601 drivers/usb/gadget/udc/dummy_hcd.c .start = dummy_start, start 1110 drivers/usb/gadget/udc/fotg210-udc.c fotg210->reg = ioremap(res->start, resource_size(res)); start 1167 drivers/usb/gadget/udc/fotg210-udc.c ret = request_irq(ires->start, fotg210_irq, IRQF_SHARED, start 1183 drivers/usb/gadget/udc/fotg210-udc.c free_irq(ires->start, fotg210); start 90 drivers/usb/gadget/udc/fsl_mxc_udc.c void __iomem *phy_regs = ioremap(res->start + start 2397 drivers/usb/gadget/udc/fsl_udc_core.c if (!request_mem_region(res->start, resource_size(res), start 2405 drivers/usb/gadget/udc/fsl_udc_core.c dr_regs = ioremap(res->start, resource_size(res)); start 2543 drivers/usb/gadget/udc/fsl_udc_core.c release_mem_region(res->start, resource_size(res)); start 2580 drivers/usb/gadget/udc/fsl_udc_core.c release_mem_region(res->start, resource_size(res)); start 521 drivers/usb/gadget/udc/fsl_usb2_udc.h unsigned int start, num, i; start 527 drivers/usb/gadget/udc/fsl_usb2_udc.h start = 0; start 538 drivers/usb/gadget/udc/fsl_usb2_udc.h printk(KERN_DEBUG "%6x: %s\n", start, line); start 540 drivers/usb/gadget/udc/fsl_usb2_udc.h start += num; start 1391 drivers/usb/gadget/udc/fusb300_udc.c reg = ioremap(res->start, resource_size(res)); start 1424 drivers/usb/gadget/udc/fusb300_udc.c ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED, start 1431 drivers/usb/gadget/udc/fusb300_udc.c ret = request_irq(ires1->start, fusb300_irq, start 1490 drivers/usb/gadget/udc/fusb300_udc.c free_irq(ires->start, fusb300); start 536 drivers/usb/gadget/udc/goku_udc.c u32 start = req->req.dma; start 537 drivers/usb/gadget/udc/goku_udc.c u32 end = start + req->req.length - 1; start 549 drivers/usb/gadget/udc/goku_udc.c writel(start, ®s->in_dma_start); start 574 drivers/usb/gadget/udc/goku_udc.c writel(start, ®s->out_dma_start); start 486 drivers/usb/gadget/udc/gr_udc.c dma_addr_t start = req->req.dma + bytes_used; start 495 drivers/usb/gadget/udc/gr_udc.c ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); start 539 drivers/usb/gadget/udc/gr_udc.c dma_addr_t start = req->req.dma + bytes_used; start 542 drivers/usb/gadget/udc/gr_udc.c ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); start 1562 drivers/usb/gadget/udc/m66592-udc.c reg = ioremap(res->start, resource_size(res)); start 1595 drivers/usb/gadget/udc/m66592-udc.c ret = request_irq(ires->start, m66592_irq, IRQF_SHARED, start 1677 drivers/usb/gadget/udc/m66592-udc.c free_irq(ires->start, m66592); start 1823 drivers/usb/gadget/udc/mv_u3d_core.c ioremap(r->start, resource_size(r)); start 1830 drivers/usb/gadget/udc/mv_u3d_core.c (unsigned long) r->start, start 1924 drivers/usb/gadget/udc/mv_u3d_core.c u3d->irq = r->start; start 2157 drivers/usb/gadget/udc/mv_udc_core.c devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 2169 drivers/usb/gadget/udc/mv_udc_core.c udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 2248 drivers/usb/gadget/udc/mv_udc_core.c udc->irq = r->start; start 2604 drivers/usb/gadget/udc/net2272.c dev = net2272_probe_init(&pdev->dev, irq_res->start); start 2618 drivers/usb/gadget/udc/net2272.c base = iomem->start; start 2621 drivers/usb/gadget/udc/net2272.c dev->base_shift = iomem_bus->start; start 2662 drivers/usb/gadget/udc/net2272.c release_mem_region(pdev->resource[0].start, start 471 drivers/usb/gadget/udc/omap_udc.c static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start) start 485 drivers/usb/gadget/udc/omap_udc.c end |= start & (0xffff << 16); start 486 drivers/usb/gadget/udc/omap_udc.c if (end < start) start 488 drivers/usb/gadget/udc/omap_udc.c return end - start; start 491 drivers/usb/gadget/udc/omap_udc.c static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) start 499 drivers/usb/gadget/udc/omap_udc.c end |= start & (0xffff << 16); start 502 drivers/usb/gadget/udc/omap_udc.c if (end < start) start 504 drivers/usb/gadget/udc/omap_udc.c return end - start; start 2759 drivers/usb/gadget/udc/omap_udc.c if (!request_mem_region(pdev->resource[0].start, start 2760 drivers/usb/gadget/udc/omap_udc.c pdev->resource[0].end - pdev->resource[0].start + 1, start 2885 drivers/usb/gadget/udc/omap_udc.c status = devm_request_irq(&pdev->dev, pdev->resource[1].start, start 2889 drivers/usb/gadget/udc/omap_udc.c (int) pdev->resource[1].start, status); start 2894 drivers/usb/gadget/udc/omap_udc.c status = devm_request_irq(&pdev->dev, pdev->resource[2].start, start 2898 drivers/usb/gadget/udc/omap_udc.c (int) pdev->resource[2].start, status); start 2902 drivers/usb/gadget/udc/omap_udc.c status = devm_request_irq(&pdev->dev, pdev->resource[3].start, start 2906 drivers/usb/gadget/udc/omap_udc.c (int) pdev->resource[3].start, status); start 2936 drivers/usb/gadget/udc/omap_udc.c release_mem_region(pdev->resource[0].start, start 2937 drivers/usb/gadget/udc/omap_udc.c pdev->resource[0].end - pdev->resource[0].start + 1); start 2952 drivers/usb/gadget/udc/omap_udc.c release_mem_region(pdev->resource[0].start, start 2953 drivers/usb/gadget/udc/omap_udc.c pdev->resource[0].end - pdev->resource[0].start + 1); start 1855 drivers/usb/gadget/udc/r8a66597-udc.c irq = ires->start; start 133 drivers/usb/gadget/udc/snps_udc_plat.c udc->phys_addr = (unsigned long)res->start; start 2130 drivers/usb/gadget/udc/udc-xilinx.c driver_name, (u32)res->start, udc->addr, start 311 drivers/usb/host/bcma-hcd.c hci_res[0].start = addr; start 312 drivers/usb/host/bcma-hcd.c hci_res[0].end = hci_res[0].start + 0x1000 - 1; start 315 drivers/usb/host/bcma-hcd.c hci_res[1].start = dev->irq; start 129 drivers/usb/host/ehci-atmel.c hcd->rsrc_start = res->start; start 202 drivers/usb/host/ehci-exynos.c hcd->rsrc_start = res->start; start 86 drivers/usb/host/ehci-fsl.c irq = res->start; start 102 drivers/usb/host/ehci-fsl.c hcd->rsrc_start = res->start; start 39 drivers/usb/host/ehci-grlib.c .start = ehci_run, start 98 drivers/usb/host/ehci-grlib.c hcd->rsrc_start = res.start; start 1202 drivers/usb/host/ehci-hcd.c .start = ehci_run, start 155 drivers/usb/host/ehci-mv.c hcd->rsrc_start = r->start; start 64 drivers/usb/host/ehci-mxc.c hcd->rsrc_start = res->start; start 141 drivers/usb/host/ehci-npcm7xx.c hcd->rsrc_start = res->start; start 143 drivers/usb/host/ehci-omap.c hcd->rsrc_start = res->start; start 253 drivers/usb/host/ehci-orion.c hcd->rsrc_start = res->start; start 239 drivers/usb/host/ehci-platform.c hcd->rsrc_start = res_mem->start; start 107 drivers/usb/host/ehci-pmcmsp.c if (!request_mem_region(res->start, res_len, "mab regs")) start 110 drivers/usb/host/ehci-pmcmsp.c dev->mab_regs = ioremap_nocache(res->start, res_len); start 123 drivers/usb/host/ehci-pmcmsp.c if (!request_mem_region(res->start, res_len, "usbid regs")) { start 127 drivers/usb/host/ehci-pmcmsp.c dev->usbid_regs = ioremap_nocache(res->start, res_len); start 137 drivers/usb/host/ehci-pmcmsp.c release_mem_region(res->start, res_len); start 143 drivers/usb/host/ehci-pmcmsp.c release_mem_region(res->start, res_len); start 175 drivers/usb/host/ehci-pmcmsp.c hcd->rsrc_start = res->start; start 205 drivers/usb/host/ehci-pmcmsp.c retval = usb_add_hcd(hcd, res->start, IRQF_SHARED); start 260 drivers/usb/host/ehci-pmcmsp.c .start = ehci_run, start 40 drivers/usb/host/ehci-ppc-of.c .start = ehci_run, start 118 drivers/usb/host/ehci-ppc-of.c hcd->rsrc_start = res.start; start 142 drivers/usb/host/ehci-ppc-of.c res.start + OHCI_HCCTRL_OFFSET, start 207 drivers/usb/host/ehci-ppc-of.c if (!request_mem_region(res.start, start 211 drivers/usb/host/ehci-ppc-of.c release_mem_region(res.start, 0x4); start 64 drivers/usb/host/ehci-ps3.c .start = ehci_run, start 1486 drivers/usb/host/ehci-sched.c u32 now, base, next, start, period, span, now2; start 1514 drivers/usb/host/ehci-sched.c start = ((-(++ehci->random_frame)) << 3) & (period - 1); start 1521 drivers/usb/host/ehci-sched.c next = start; start 1522 drivers/usb/host/ehci-sched.c start += period; start 1524 drivers/usb/host/ehci-sched.c start--; start 1527 drivers/usb/host/ehci-sched.c if (itd_slot_ok(ehci, stream, start)) start 1530 drivers/usb/host/ehci-sched.c if ((start % 8) >= 6) start 1532 drivers/usb/host/ehci-sched.c if (sitd_slot_ok(ehci, stream, start, start 1536 drivers/usb/host/ehci-sched.c } while (start > next && !done); start 1544 drivers/usb/host/ehci-sched.c stream->ps.phase = (start >> 3) & start 1548 drivers/usb/host/ehci-sched.c stream->ps.phase_uf = start & 7; start 1554 drivers/usb/host/ehci-sched.c start = (stream->ps.phase << 3) + stream->ps.phase_uf; start 1557 drivers/usb/host/ehci-sched.c stream->next_uframe = start; start 1579 drivers/usb/host/ehci-sched.c start = (stream->next_uframe - base) & (mod - 1); start 1593 drivers/usb/host/ehci-sched.c if (unlikely(!empty && start < period)) { start 1601 drivers/usb/host/ehci-sched.c if (likely(!empty || start <= now2 + period)) { start 1604 drivers/usb/host/ehci-sched.c if (unlikely(start < next && start 1609 drivers/usb/host/ehci-sched.c if (likely(start >= now2)) start 1621 drivers/usb/host/ehci-sched.c skip = (now2 - start + period - 1) & -period; start 1624 drivers/usb/host/ehci-sched.c urb, start + base, span - period, now2 + base, start 1645 drivers/usb/host/ehci-sched.c start = next + ((start - next) & (period - 1)); start 1649 drivers/usb/host/ehci-sched.c if (unlikely(start + span - period >= mod + wrap)) { start 1651 drivers/usb/host/ehci-sched.c urb, start, span - period, mod + wrap); start 1656 drivers/usb/host/ehci-sched.c start += base; start 1657 drivers/usb/host/ehci-sched.c stream->next_uframe = (start + skip) & (mod - 1); start 1660 drivers/usb/host/ehci-sched.c urb->start_frame = start & (mod - 1); start 42 drivers/usb/host/ehci-sh.c .start = ehci_run, start 108 drivers/usb/host/ehci-sh.c hcd->rsrc_start = res->start; start 104 drivers/usb/host/ehci-spear.c hcd->rsrc_start = res->start; start 227 drivers/usb/host/ehci-st.c hcd->rsrc_start = res_mem->start; start 463 drivers/usb/host/ehci-tegra.c hcd->rsrc_start = res->start; start 75 drivers/usb/host/ehci-xilinx-of.c .start = ehci_run, start 140 drivers/usb/host/ehci-xilinx-of.c hcd->rsrc_start = res.start; start 544 drivers/usb/host/fhci-hcd.c .start = fhci_start, start 604 drivers/usb/host/fhci-hcd.c hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs)); start 2758 drivers/usb/host/fotg210-hcd.c qh->start = NO_FRAME; start 3492 drivers/usb/host/fotg210-hcd.c (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, start 3499 drivers/usb/host/fotg210-hcd.c for (i = qh->start; i < fotg210->periodic_size; i += period) { start 3576 drivers/usb/host/fotg210-hcd.c for (i = qh->start; i < fotg210->periodic_size; i += period) start 3587 drivers/usb/host/fotg210-hcd.c (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, start 3768 drivers/usb/host/fotg210-hcd.c frame = qh->start; start 3808 drivers/usb/host/fotg210-hcd.c qh->start = frame; start 4168 drivers/usb/host/fotg210-hcd.c u32 now, next, start, period, span; start 4208 drivers/usb/host/fotg210-hcd.c start = next + excess - mod + period * start 4211 drivers/usb/host/fotg210-hcd.c start = next + excess + period; start 4212 drivers/usb/host/fotg210-hcd.c if (start - now >= mod) { start 4214 drivers/usb/host/fotg210-hcd.c urb, start - now - period, period, start 4230 drivers/usb/host/fotg210-hcd.c start = SCHEDULE_SLOP + (now & ~0x07); start 4239 drivers/usb/host/fotg210-hcd.c next = start; start 4240 drivers/usb/host/fotg210-hcd.c start += period; start 4242 drivers/usb/host/fotg210-hcd.c start--; start 4244 drivers/usb/host/fotg210-hcd.c if (itd_slot_ok(fotg210, mod, start, start 4247 drivers/usb/host/fotg210-hcd.c } while (start > next && !done); start 4259 drivers/usb/host/fotg210-hcd.c if (unlikely(start - now + span - period >= start 4262 drivers/usb/host/fotg210-hcd.c urb, start - now, span - period, start 4268 drivers/usb/host/fotg210-hcd.c stream->next_uframe = start & (mod - 1); start 5517 drivers/usb/host/fotg210-hcd.c .start = fotg210_run, start 5589 drivers/usb/host/fotg210-hcd.c irq = res->start; start 5608 drivers/usb/host/fotg210-hcd.c hcd->rsrc_start = res->start; start 466 drivers/usb/host/fotg210.h unsigned short start; /* where polling starts */ start 1778 drivers/usb/host/imx21-hcd.c .start = imx21_hc_start, start 1817 drivers/usb/host/imx21-hcd.c release_mem_region(res->start, resource_size(res)); start 1863 drivers/usb/host/imx21-hcd.c res = request_mem_region(res->start, resource_size(res), hcd_name); start 1869 drivers/usb/host/imx21-hcd.c imx21->regs = ioremap(res->start, resource_size(res)); start 1911 drivers/usb/host/imx21-hcd.c release_mem_region(res->start, resource_size(res)); start 1513 drivers/usb/host/isp116x-hcd.c .start = isp116x_start, start 1544 drivers/usb/host/isp116x-hcd.c release_mem_region(res->start, 2); start 1547 drivers/usb/host/isp116x-hcd.c release_mem_region(res->start, 2); start 1581 drivers/usb/host/isp116x-hcd.c irq = ires->start; start 1584 drivers/usb/host/isp116x-hcd.c if (!request_mem_region(addr->start, 2, hcd_name)) { start 1588 drivers/usb/host/isp116x-hcd.c addr_reg = ioremap(addr->start, resource_size(addr)); start 1593 drivers/usb/host/isp116x-hcd.c if (!request_mem_region(data->start, 2, hcd_name)) { start 1597 drivers/usb/host/isp116x-hcd.c data_reg = ioremap(data->start, resource_size(data)); start 1610 drivers/usb/host/isp116x-hcd.c hcd->rsrc_start = addr->start; start 1646 drivers/usb/host/isp116x-hcd.c release_mem_region(data->start, 2); start 1650 drivers/usb/host/isp116x-hcd.c release_mem_region(addr->start, 2); start 2594 drivers/usb/host/isp1362-hcd.c .start = isp1362_hc_start, start 2652 drivers/usb/host/isp1362-hcd.c irq = irq_res->start; start 2669 drivers/usb/host/isp1362-hcd.c hcd->rsrc_start = data->start; start 1809 drivers/usb/host/max3421-hcd.c .start = max3421_start, start 194 drivers/usb/host/ohci-at91.c hcd->rsrc_start = res->start; start 444 drivers/usb/host/ohci-da8xx.c hcd->rsrc_start = mem->start; start 170 drivers/usb/host/ohci-exynos.c hcd->rsrc_start = res->start; start 1196 drivers/usb/host/ohci-hcd.c .start = ohci_start, start 210 drivers/usb/host/ohci-nxp.c hcd->rsrc_start = res->start; start 342 drivers/usb/host/ohci-omap.c hcd->rsrc_start = pdev->resource[0].start; start 343 drivers/usb/host/ohci-omap.c hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; start 210 drivers/usb/host/ohci-platform.c hcd->rsrc_start = res_mem->start; start 58 drivers/usb/host/ohci-ppc-of.c .start = ohci_ppc_of_start, start 116 drivers/usb/host/ohci-ppc-of.c hcd->rsrc_start = res.start; start 164 drivers/usb/host/ohci-ppc-of.c if (!request_mem_region(res.start, 0x4, hcd_name)) { start 169 drivers/usb/host/ohci-ppc-of.c release_mem_region(res.start, 0x4); start 51 drivers/usb/host/ohci-ps3.c .start = ps3_ohci_hc_start, start 460 drivers/usb/host/ohci-pxa27x.c hcd->rsrc_start = r->start; start 368 drivers/usb/host/ohci-s3c2410.c hcd->rsrc_start = dev->resource[0].start; start 393 drivers/usb/host/ohci-s3c2410.c retval = usb_add_hcd(hcd, dev->resource[1].start, 0); start 93 drivers/usb/host/ohci-sa1111.c .start = ohci_sa1111_start, start 197 drivers/usb/host/ohci-sa1111.c hcd->rsrc_start = dev->res.start; start 58 drivers/usb/host/ohci-sm501.c .start = ohci_sm501_start, start 107 drivers/usb/host/ohci-sm501.c if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) { start 127 drivers/usb/host/ohci-sm501.c hcd->rsrc_start = res->start; start 160 drivers/usb/host/ohci-sm501.c if (usb_hcd_setup_local_mem(hcd, mem->start, start 161 drivers/usb/host/ohci-sm501.c mem->start - mem->parent->start, start 182 drivers/usb/host/ohci-sm501.c release_mem_region(mem->start, resource_size(mem)); start 197 drivers/usb/host/ohci-sm501.c release_mem_region(mem->start, resource_size(mem)); start 79 drivers/usb/host/ohci-spear.c hcd->rsrc_start = pdev->resource[0].start; start 204 drivers/usb/host/ohci-st.c hcd->rsrc_start = res_mem->start; start 162 drivers/usb/host/ohci-tmio.c .start = ohci_tmio_start, start 211 drivers/usb/host/ohci-tmio.c hcd->rsrc_start = regs->start; start 218 drivers/usb/host/ohci-tmio.c tmio->ccr = ioremap(config->start, resource_size(config)); start 240 drivers/usb/host/ohci-tmio.c ret = usb_hcd_setup_local_mem(hcd, sram->start, sram->start, start 350 drivers/usb/host/oxu210hp-hcd.c unsigned short start; /* where polling starts */ start 1813 drivers/usb/host/oxu210hp-hcd.c qh->start = NO_FRAME; start 2360 drivers/usb/host/oxu210hp-hcd.c qh, qh->start, qh->usecs, qh->c_usecs); start 2366 drivers/usb/host/oxu210hp-hcd.c for (i = qh->start; i < oxu->periodic_size; i += period) { start 2435 drivers/usb/host/oxu210hp-hcd.c for (i = qh->start; i < oxu->periodic_size; i += period) start 2447 drivers/usb/host/oxu210hp-hcd.c qh, qh->start, qh->usecs, qh->c_usecs); start 2558 drivers/usb/host/oxu210hp-hcd.c frame = qh->start; start 2595 drivers/usb/host/oxu210hp-hcd.c qh->start = frame; start 4037 drivers/usb/host/oxu210hp-hcd.c .start = oxu_run, start 4227 drivers/usb/host/oxu210hp-hcd.c irq = res->start; start 4236 drivers/usb/host/oxu210hp-hcd.c memstart = res->start; start 2317 drivers/usb/host/r8a66597-hcd.c .start = r8a66597_start, start 2429 drivers/usb/host/r8a66597-hcd.c irq = ires->start; start 2432 drivers/usb/host/r8a66597-hcd.c reg = ioremap(res->start, resource_size(res)); start 2490 drivers/usb/host/r8a66597-hcd.c hcd->rsrc_start = res->start; start 407 drivers/usb/host/sl811-hcd.c sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF)); start 413 drivers/usb/host/sl811-hcd.c sl811->active_b = start(sl811, SL811_EP_B(SL811_HOST_BUF)); start 1558 drivers/usb/host/sl811-hcd.c .start = sl811h_start, start 1632 drivers/usb/host/sl811-hcd.c irq = ires->start; start 1650 drivers/usb/host/sl811-hcd.c addr_reg = (void __iomem *) (unsigned long) addr->start; start 1651 drivers/usb/host/sl811-hcd.c data_reg = (void __iomem *) (unsigned long) data->start; start 1653 drivers/usb/host/sl811-hcd.c addr_reg = ioremap(addr->start, 1); start 1659 drivers/usb/host/sl811-hcd.c data_reg = ioremap(data->start, 1); start 1672 drivers/usb/host/sl811-hcd.c hcd->rsrc_start = addr->start; start 98 drivers/usb/host/sl811_cs.c resources[0].start = irq; start 100 drivers/usb/host/sl811_cs.c resources[1].start = base_addr; start 103 drivers/usb/host/sl811_cs.c resources[2].start = base_addr + 1; start 167 drivers/usb/host/sl811_cs.c if (sl811_hc_init(parent, link->resource[0]->start, link->irq) start 111 drivers/usb/host/ssb-hcd.c hci_res[0].start = addr; start 112 drivers/usb/host/ssb-hcd.c hci_res[0].end = hci_res[0].start + len - 1; start 115 drivers/usb/host/ssb-hcd.c hci_res[1].start = dev->irq; start 153 drivers/usb/host/ssb-hcd.c int start, len; start 181 drivers/usb/host/ssb-hcd.c start = ssb_admatch_base(tmp); start 183 drivers/usb/host/ssb-hcd.c usb_dev->ohci_dev = ssb_hcd_create_pdev(dev, true, start, len); start 188 drivers/usb/host/ssb-hcd.c start = ssb_admatch_base(tmp) + 0x800; /* ehci core offset */ start 189 drivers/usb/host/ssb-hcd.c usb_dev->ehci_dev = ssb_hcd_create_pdev(dev, false, start, len); start 2946 drivers/usb/host/u132-hcd.c .start = u132_hcd_start, start 70 drivers/usb/host/uhci-grlib.c .start = uhci_start, start 115 drivers/usb/host/uhci-grlib.c hcd->rsrc_start = res.start; start 268 drivers/usb/host/uhci-pci.c .start = uhci_start, start 48 drivers/usb/host/uhci-platform.c .start = uhci_start, start 100 drivers/usb/host/uhci-platform.c hcd->rsrc_start = res->start; start 135 drivers/usb/host/uhci-platform.c ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); start 44 drivers/usb/host/xhci-ext-caps.c res.start = hcd->rsrc_start + cap_offset; start 45 drivers/usb/host/xhci-ext-caps.c res.end = res.start + USB_SW_RESOURCE_SIZE - 1; start 97 drivers/usb/host/xhci-ext-caps.h static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) start 103 drivers/usb/host/xhci-ext-caps.h offset = start; start 104 drivers/usb/host/xhci-ext-caps.h if (!start || start == XHCI_HCC_PARAMS_OFFSET) { start 116 drivers/usb/host/xhci-ext-caps.h if (offset != start && (id == 0 || XHCI_EXT_CAPS_ID(val) == id)) start 253 drivers/usb/host/xhci-histb.c hcd->rsrc_start = res->start; start 515 drivers/usb/host/xhci-mtk.c hcd->rsrc_start = res->start; start 61 drivers/usb/host/xhci-mvebu.c base = ioremap(res->start, resource_size(res)); start 36 drivers/usb/host/xhci-plat.c .start = xhci_plat_start, start 229 drivers/usb/host/xhci-plat.c hcd->rsrc_start = res->start; start 323 drivers/usb/host/xhci-tegra.c static unsigned long extract_field(u32 value, unsigned int start, start 326 drivers/usb/host/xhci-tegra.c return (value >> start) & ((1 << count) - 1); start 634 drivers/usb/host/xhci-tegra.c value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT); start 1211 drivers/usb/host/xhci-tegra.c tegra->hcd->rsrc_start = regs->start; start 5309 drivers/usb/host/xhci.c .start = xhci_run, start 5371 drivers/usb/host/xhci.c if (over->start) start 5372 drivers/usb/host/xhci.c drv->start = over->start; start 1912 drivers/usb/host/xhci.h int (*start)(struct usb_hcd *hcd); start 311 drivers/usb/isp1760/isp1760-hcd.c priv->memory_pool[i].start = payload_addr; start 319 drivers/usb/isp1760/isp1760-hcd.c priv->memory_pool[curr + i].start = payload_addr; start 327 drivers/usb/isp1760/isp1760-hcd.c priv->memory_pool[curr + i].start = payload_addr; start 333 drivers/usb/isp1760/isp1760-hcd.c WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE); start 350 drivers/usb/isp1760/isp1760-hcd.c qtd->payload_addr = priv->memory_pool[i].start; start 365 drivers/usb/isp1760/isp1760-hcd.c if (priv->memory_pool[i].start == qtd->payload_addr) { start 2143 drivers/usb/isp1760/isp1760-hcd.c .start = isp1760_run, start 2208 drivers/usb/isp1760/isp1760-hcd.c hcd->rsrc_start = mem->start; start 38 drivers/usb/isp1760/isp1760-hcd.h unsigned int start; start 246 drivers/usb/isp1760/isp1760-if.c ret = isp1760_register(mem_res, irq_res->start, irqflags, &pdev->dev, start 2657 drivers/usb/misc/usbtest.c struct timespec64 start; start 2707 drivers/usb/misc/usbtest.c ktime_get_ts64(&start); start 2715 drivers/usb/misc/usbtest.c duration = timespec64_sub(end, start); start 186 drivers/usb/mtu3/mtu3.h struct qmu_gpd *start; start 211 drivers/usb/mtu3/mtu3_debugfs.c &ring->dma, ring->start, ring->end, start 230 drivers/usb/mtu3/mtu3_debugfs.c gpd = ring->start; start 123 drivers/usb/mtu3/mtu3_qmu.c struct qmu_gpd *gpd_head = ring->start; start 136 drivers/usb/mtu3/mtu3_qmu.c struct qmu_gpd *gpd_head = ring->start; start 148 drivers/usb/mtu3/mtu3_qmu.c ring->start = gpd; start 157 drivers/usb/mtu3/mtu3_qmu.c struct qmu_gpd *gpd = ring->start; start 185 drivers/usb/mtu3/mtu3_qmu.c ring->start, ring->dma); start 208 drivers/usb/mtu3/mtu3_qmu.c ring->enqueue = ring->start; start 218 drivers/usb/mtu3/mtu3_qmu.c ring->dequeue = ring->start; start 232 drivers/usb/mtu3/mtu3_qmu.c next = ring->start; start 565 drivers/usb/musb/da8xx.c musb_resources[0].start = pdev->resource[0].start; start 570 drivers/usb/musb/da8xx.c musb_resources[1].start = pdev->resource[1].start; start 534 drivers/usb/musb/davinci.c musb_resources[0].start = pdev->resource[0].start; start 539 drivers/usb/musb/davinci.c musb_resources[1].start = pdev->resource[1].start; start 548 drivers/usb/musb/davinci.c musb_resources[2].start = pdev->resource[2].start; start 751 drivers/usb/musb/musb_dsps.c (resources[0].start & 0xFFF) == 0x400 ? 0 : 1); start 254 drivers/usb/musb/musb_host.c goto start; start 265 drivers/usb/musb/musb_host.c start: start 2691 drivers/usb/musb/musb_host.c .start = musb_h_start, start 460 drivers/usb/musb/omap2430.c musb_resources[0].start = pdev->resource[0].start; start 465 drivers/usb/musb/omap2430.c musb_resources[1].start = pdev->resource[1].start; start 470 drivers/usb/musb/omap2430.c musb_resources[2].start = pdev->resource[2].start; start 1105 drivers/usb/musb/tusb6010.c musb->async = mem->start; start 1114 drivers/usb/musb/tusb6010.c musb->sync = mem->start; start 1116 drivers/usb/musb/tusb6010.c sync = ioremap(mem->start, resource_size(mem)); start 1223 drivers/usb/musb/tusb6010.c musb_resources[0].start = pdev->resource[0].start; start 1228 drivers/usb/musb/tusb6010.c musb_resources[1].start = pdev->resource[1].start; start 1233 drivers/usb/musb/tusb6010.c musb_resources[2].start = pdev->resource[2].start; start 280 drivers/usb/musb/ux500.c musb_resources[0].start = pdev->resource[0].start; start 285 drivers/usb/musb/ux500.c musb_resources[1].start = pdev->resource[1].start; start 378 drivers/usb/musb/ux500_dma.c controller->phy_base = (dma_addr_t) iomem->start; start 857 drivers/usb/phy/phy-fsl-usb.c usb_dr_regs = ioremap(res->start, sizeof(struct usb_dr_mmap)); start 286 drivers/usb/phy/phy-gpio-vbus-usb.c irq = res->start; start 884 drivers/usb/phy/phy-isp1301-omap.c status = request_irq(otg_dev->resource[1].start, omap_otg_irq, start 898 drivers/usb/phy/phy-isp1301-omap.c free_irq(otg_dev->resource[1].start, isp); start 727 drivers/usb/phy/phy-mv-usb.c mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 742 drivers/usb/phy/phy-mv-usb.c mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); start 798 drivers/usb/phy/phy-mv-usb.c mvotg->irq = r->start; start 959 drivers/usb/phy/phy-tegra-usb.c tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, start 1080 drivers/usb/phy/phy-tegra-usb.c tegra_phy->regs = devm_ioremap(&pdev->dev, res->start, start 485 drivers/usb/renesas_usbhs/common.c usbhs_mod_call(priv, start, priv); start 674 drivers/usb/renesas_usbhs/common.c priv->irq = irq_res->start; start 33 drivers/usb/renesas_usbhs/mod.h int (*start)(struct usbhs_priv *priv); start 76 drivers/usb/renesas_usbhs/mod_gadget.c #define __usbhsg_for_each_uep(start, pos, g, i) \ start 77 drivers/usb/renesas_usbhs/mod_gadget.c for ((i) = start; \ start 1116 drivers/usb/renesas_usbhs/mod_gadget.c gpriv->mod.start = usbhsg_start; start 104 drivers/usb/renesas_usbhs/mod_host.c #define __usbhsh_for_each_udev(start, pos, h, i) \ start 105 drivers/usb/renesas_usbhs/mod_host.c for ((i) = start; \ start 1288 drivers/usb/renesas_usbhs/mod_host.c .start = usbhsh_host_start, start 1551 drivers/usb/renesas_usbhs/mod_host.c hpriv->mod.start = usbhsh_start; start 48 drivers/usb/renesas_usbhs/pipe.h #define __usbhs_for_each_pipe(start, pos, info, i) \ start 49 drivers/usb/renesas_usbhs/pipe.h for ((i) = start; \ start 164 drivers/usb/roles/intel-xhci-usb-role-switch.c data->base = devm_ioremap_nocache(dev, res->start, resource_size(res)); start 93 drivers/usb/typec/ucsi/ucsi_acpi.c ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 1309 drivers/usb/usbip/vhci_hcd.c .start = vhci_start, start 140 drivers/vfio/pci/vfio_pci.c if (!(res->start & ~PAGE_MASK)) { start 151 drivers/vfio/pci/vfio_pci.c dummy_res->resource.start = res->end + 1; start 152 drivers/vfio/pci/vfio_pci.c dummy_res->resource.end = res->start + PAGE_SIZE - 1; start 931 drivers/vfio/pci/vfio_pci.c hdr.start, hdr.count, data); start 1062 drivers/vfio/pci/vfio_pci_config.c int start; start 1064 drivers/vfio/pci/vfio_pci_config.c start = vfio_find_cap_start(vdev, pos); start 1066 drivers/vfio/pci/vfio_pci_config.c flags = (__le16 *)&vdev->vconfig[start]; start 1087 drivers/vfio/pci/vfio_pci_config.c int start, ret; start 1089 drivers/vfio/pci/vfio_pci_config.c start = vfio_find_cap_start(vdev, pos); start 1091 drivers/vfio/pci/vfio_pci_config.c pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS]; start 1108 drivers/vfio/pci/vfio_pci_config.c start + PCI_MSI_FLAGS, start 354 drivers/vfio/pci/vfio_pci_intrs.c static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start, start 359 drivers/vfio/pci/vfio_pci_intrs.c if (start >= vdev->num_ctx || start + count > vdev->num_ctx) start 362 drivers/vfio/pci/vfio_pci_intrs.c for (i = 0, j = start; i < count && !ret; i++, j++) { start 368 drivers/vfio/pci/vfio_pci_intrs.c for (--j; j >= (int)start; j--) start 405 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, start 408 drivers/vfio/pci/vfio_pci_intrs.c if (!is_intx(vdev) || start != 0 || count != 1) start 432 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, start 435 drivers/vfio/pci/vfio_pci_intrs.c if (!is_intx(vdev) || start != 0 || count != 1) start 452 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, start 460 drivers/vfio/pci/vfio_pci_intrs.c if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) start 495 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, start 514 drivers/vfio/pci/vfio_pci_intrs.c return vfio_msi_set_block(vdev, start, count, start 517 drivers/vfio/pci/vfio_pci_intrs.c ret = vfio_msi_enable(vdev, start + count, msix); start 521 drivers/vfio/pci/vfio_pci_intrs.c ret = vfio_msi_set_block(vdev, start, count, fds, msix); start 528 drivers/vfio/pci/vfio_pci_intrs.c if (!irq_is(vdev, index) || start + count > vdev->num_ctx) start 531 drivers/vfio/pci/vfio_pci_intrs.c for (i = start; i < start + count; i++) { start 538 drivers/vfio/pci/vfio_pci_intrs.c if (bools[i - start]) start 601 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, start 604 drivers/vfio/pci/vfio_pci_intrs.c if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) start 612 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, start 615 drivers/vfio/pci/vfio_pci_intrs.c if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) start 623 drivers/vfio/pci/vfio_pci_intrs.c unsigned index, unsigned start, unsigned count, start 627 drivers/vfio/pci/vfio_pci_intrs.c unsigned start, unsigned count, uint32_t flags, start 676 drivers/vfio/pci/vfio_pci_intrs.c return func(vdev, index, start, count, flags, data); start 138 drivers/vfio/pci/vfio_pci_private.h unsigned start, unsigned count, void *data); start 157 drivers/vfio/platform/vfio_platform_common.c vdev->regions[i].addr = res->start; start 391 drivers/vfio/platform/vfio_platform_common.c hdr.start, hdr.count, data); start 42 drivers/vfio/platform/vfio_platform_irq.c unsigned index, unsigned start, start 46 drivers/vfio/platform/vfio_platform_irq.c if (start != 0 || count != 1) start 102 drivers/vfio/platform/vfio_platform_irq.c unsigned index, unsigned start, start 106 drivers/vfio/platform/vfio_platform_irq.c if (start != 0 || count != 1) start 219 drivers/vfio/platform/vfio_platform_irq.c unsigned index, unsigned start, start 234 drivers/vfio/platform/vfio_platform_irq.c if (start != 0 || count != 1) start 257 drivers/vfio/platform/vfio_platform_irq.c uint32_t flags, unsigned index, unsigned start, start 261 drivers/vfio/platform/vfio_platform_irq.c unsigned start, unsigned count, uint32_t flags, start 279 drivers/vfio/platform/vfio_platform_irq.c return func(vdev, index, start, count, flags, data); start 91 drivers/vfio/platform/vfio_platform_private.h unsigned start, unsigned count, start 1863 drivers/vfio/vfio.c (hdr->count >= (U32_MAX - hdr->start)) || start 1871 drivers/vfio/vfio.c if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) start 211 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long start = tbl->it_offset; start 212 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long end = start + tbl->it_size; start 214 drivers/vfio/vfio_iommu_spapr_tce.c if ((start <= entry) && (entry < end)) { start 103 drivers/vfio/vfio_iommu_type1.c dma_addr_t start; start 135 drivers/vfio/vfio_iommu_type1.c dma_addr_t start, size_t size) start 142 drivers/vfio/vfio_iommu_type1.c if (start + size <= dma->iova) start 144 drivers/vfio/vfio_iommu_type1.c else if (start >= dma->iova + dma->size) start 1054 drivers/vfio/vfio_iommu_type1.c dma_addr_t start, dma_addr_t end) start 1060 drivers/vfio/vfio_iommu_type1.c if (start >= node->start && end <= node->end) start 1326 drivers/vfio/vfio_iommu_type1.c *base = region->start; start 1440 drivers/vfio/vfio_iommu_type1.c dma_addr_t start, dma_addr_t end) start 1449 drivers/vfio/vfio_iommu_type1.c region->start = start; start 1461 drivers/vfio/vfio_iommu_type1.c dma_addr_t start, dma_addr_t end) start 1472 drivers/vfio/vfio_iommu_type1.c if (start > last->end || end < first->start) start 1476 drivers/vfio/vfio_iommu_type1.c if (start > first->start) { start 1477 drivers/vfio/vfio_iommu_type1.c if (vfio_find_dma(iommu, first->start, start - first->start)) start 1495 drivers/vfio/vfio_iommu_type1.c dma_addr_t start, dma_addr_t end) start 1500 drivers/vfio/vfio_iommu_type1.c return vfio_iommu_iova_insert(iova, start, end); start 1504 drivers/vfio/vfio_iommu_type1.c if (start < node->start) start 1506 drivers/vfio/vfio_iommu_type1.c if (start >= node->start && start < node->end) { start 1507 drivers/vfio/vfio_iommu_type1.c node->start = start; start 1519 drivers/vfio/vfio_iommu_type1.c if (end > node->start && end <= node->end) { start 1544 drivers/vfio/vfio_iommu_type1.c if (vfio_find_dma(iommu, region->start, region->length)) start 1562 drivers/vfio/vfio_iommu_type1.c phys_addr_t start, end; start 1567 drivers/vfio/vfio_iommu_type1.c start = resv->start; start 1568 drivers/vfio/vfio_iommu_type1.c end = resv->start + resv->length - 1; start 1574 drivers/vfio/vfio_iommu_type1.c if (start > n->end || end < n->start) start 1583 drivers/vfio/vfio_iommu_type1.c if (start > n->start) start 1584 drivers/vfio/vfio_iommu_type1.c ret = vfio_iommu_iova_insert(&n->list, n->start, start 1585 drivers/vfio/vfio_iommu_type1.c start - 1); start 1631 drivers/vfio/vfio_iommu_type1.c ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); start 1913 drivers/vfio/vfio_iommu_type1.c dma_addr_t start = 0; start 1922 drivers/vfio/vfio_iommu_type1.c if (geo.aperture_start > start) start 1923 drivers/vfio/vfio_iommu_type1.c start = geo.aperture_start; start 1930 drivers/vfio/vfio_iommu_type1.c node->start = start; start 1947 drivers/vfio/vfio_iommu_type1.c dma_addr_t start, end; start 1964 drivers/vfio/vfio_iommu_type1.c start = node->start; start 1971 drivers/vfio/vfio_iommu_type1.c ret = vfio_iommu_aper_resize(iova_copy, start, end); start 2201 drivers/vfio/vfio_iommu_type1.c cap_iovas->iova_ranges[i].start = iova->start; start 731 drivers/vhost/vhost.c node->start, start 747 drivers/vhost/vhost.c return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); start 1018 drivers/vhost/vhost.c u64 start, u64 size, u64 end, start 1035 drivers/vhost/vhost.c node->start = start; start 1049 drivers/vhost/vhost.c u64 start, u64 end) start 1054 drivers/vhost/vhost.c start, end))) start 1241 drivers/vhost/vhost.c void *start = &node->msg; start 1257 drivers/vhost/vhost.c ret = copy_to_iter(start, size, to); start 1332 drivers/vhost/vhost.c if (node == NULL || node->start > addr) { start 1342 drivers/vhost/vhost.c size = node->size - addr + node->start; start 1874 drivers/vhost/vhost.c u64 start, end, l, min; start 1887 drivers/vhost/vhost.c start = max(u->userspace_addr, hva); start 1890 drivers/vhost/vhost.c l = end - start + 1; start 1892 drivers/vhost/vhost.c u->start + start - u->userspace_addr, start 2059 drivers/vhost/vhost.c if (node == NULL || node->start > addr) { start 2072 drivers/vhost/vhost.c size = node->size - addr + node->start; start 2075 drivers/vhost/vhost.c (node->userspace_addr + addr - node->start); start 2375 drivers/vhost/vhost.c int start; start 2377 drivers/vhost/vhost.c start = vq->last_used_idx & (vq->num - 1); start 2378 drivers/vhost/vhost.c used = vq->used->ring + start; start 2379 drivers/vhost/vhost.c if (vhost_put_used(vq, heads, start, count)) { start 2406 drivers/vhost/vhost.c int start, n, r; start 2408 drivers/vhost/vhost.c start = vq->last_used_idx & (vq->num - 1); start 2409 drivers/vhost/vhost.c n = vq->num - start; start 55 drivers/vhost/vhost.h #define START(node) ((node)->start) start 61 drivers/vhost/vhost.h __u64 start; start 113 drivers/vhost/vringh.c if (addr < range->start || addr > range->end_incl) { start 117 drivers/vhost/vringh.c BUG_ON(addr < range->start || addr > range->end_incl); start 735 drivers/vhost/vsock.c int start; start 744 drivers/vhost/vsock.c if (copy_from_user(&start, argp, sizeof(start))) start 746 drivers/vhost/vsock.c if (start) start 211 drivers/video/backlight/88pm860x_bl.c data->reg_duty_cycle = res->start; start 217 drivers/video/backlight/88pm860x_bl.c data->reg_always_on = res->start; start 223 drivers/video/backlight/88pm860x_bl.c data->reg_current = res->start; start 84 drivers/video/backlight/ep93xx_bl.c ep93xxbl->mmio = devm_ioremap(&dev->dev, res->start, start 102 drivers/video/backlight/lp855x_bl.c u8 start, end; start 109 drivers/video/backlight/lp855x_bl.c start = LP855X_EEPROM_START; start 113 drivers/video/backlight/lp855x_bl.c start = LP8556_EPROM_START; start 117 drivers/video/backlight/lp855x_bl.c start = LP8555_EPROM_START; start 121 drivers/video/backlight/lp855x_bl.c start = LP8557_EPROM_START; start 128 drivers/video/backlight/lp855x_bl.c return addr >= start && addr <= end; start 151 drivers/video/backlight/max8925_bl.c data->reg_mode_cntl = res->start; start 157 drivers/video/backlight/max8925_bl.c data->reg_cntl = res->start; start 708 drivers/video/console/newport_con.c if (!dev->resource.start) start 714 drivers/video/console/newport_con.c newport_addr = dev->resource.start + 0xF0000; start 253 drivers/video/console/sticore.c static void sti_flush(unsigned long start, unsigned long end) start 255 drivers/video/console/sticore.c flush_icache_range(start, end); start 947 drivers/video/console/sticore.c int hpa = dev->hpa.start; start 288 drivers/video/console/vgacon.c int start, end, count, soff; start 306 drivers/video/console/vgacon.c start = vgacon_scrollback_cur->cur + lines; start 307 drivers/video/console/vgacon.c end = start + abs(lines); start 309 drivers/video/console/vgacon.c if (start < 0) start 310 drivers/video/console/vgacon.c start = 0; start 312 drivers/video/console/vgacon.c if (start > vgacon_scrollback_cur->cnt) start 313 drivers/video/console/vgacon.c start = vgacon_scrollback_cur->cnt; start 321 drivers/video/console/vgacon.c vgacon_scrollback_cur->cur = start; start 322 drivers/video/console/vgacon.c count = end - start; start 330 drivers/video/console/vgacon.c count = vgacon_scrollback_cur->cnt - start; start 433 drivers/video/console/vgacon.c .start = 0x3B0, start 444 drivers/video/console/vgacon.c .start = 0x3B0, start 449 drivers/video/console/vgacon.c .start = 0x3BF, start 475 drivers/video/console/vgacon.c .start = 0x3C0, start 485 drivers/video/console/vgacon.c .start = 0x3C0, start 520 drivers/video/console/vgacon.c .start = 0x3D4, start 565 drivers/video/fbdev/acornfb.c unsigned long start, size; start 568 drivers/video/fbdev/acornfb.c start = info->fix.smem_start; start 579 drivers/video/fbdev/acornfb.c iomd_writel(start, IOMD_VIDSTART); start 466 drivers/video/fbdev/amba-clcd.c fb->fb.fix.mmio_start = fb->dev->res.start; start 910 drivers/video/fbdev/amba-clcd.c (unsigned long long)dev->res.start); start 997 drivers/video/fbdev/arkfb.c bus_reg.start = 0; start 1004 drivers/video/fbdev/arkfb.c par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start; start 2682 drivers/video/fbdev/atafb.c cmap.start = 0; start 1126 drivers/video/fbdev/atmel_lcdfb.c info->fix.smem_start = map->start; start 1155 drivers/video/fbdev/atmel_lcdfb.c info->fix.mmio_start = regs->start; start 1946 drivers/video/fbdev/aty/atyfb_base.c unsigned long start = par->mmap_map[i].voff; start 1947 drivers/video/fbdev/aty/atyfb_base.c unsigned long end = start + par->mmap_map[i].size; start 1950 drivers/video/fbdev/aty/atyfb_base.c if (start > offset) start 1955 drivers/video/fbdev/aty/atyfb_base.c map_size = par->mmap_map[i].size - (offset - start); start 1956 drivers/video/fbdev/aty/atyfb_base.c map_offset = par->mmap_map[i].poff + (offset - start); start 2933 drivers/video/fbdev/aty/atyfb_base.c for (i = 0; i < 6 && pdev->resource[i].start; i++) start 2943 drivers/video/fbdev/aty/atyfb_base.c for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) { start 2949 drivers/video/fbdev/aty/atyfb_base.c base = rp->start; start 3405 drivers/video/fbdev/aty/atyfb_base.c request_mem_region(rrp->start, resource_size(rrp), "atyfb")) { start 3406 drivers/video/fbdev/aty/atyfb_base.c par->aux_start = rrp->start; start 3408 drivers/video/fbdev/aty/atyfb_base.c raddr = rrp->start; start 3512 drivers/video/fbdev/aty/atyfb_base.c addr = rp->start; start 3517 drivers/video/fbdev/aty/atyfb_base.c res_start = rp->start; start 1276 drivers/video/fbdev/aty/radeon_base.c int i, start, rc = 0; start 1297 drivers/video/fbdev/aty/radeon_base.c start = cmap->start; start 1307 drivers/video/fbdev/aty/radeon_base.c rc = radeon_setcolreg (start++, hred, hgreen, hblue, htransp, start 433 drivers/video/fbdev/au1100fb.c au1100fb_fix.mmio_start = regs_res->start; start 292 drivers/video/fbdev/bw2.c info->fix.smem_start = op->resource[0].start; start 492 drivers/video/fbdev/cg14.c info->fix.smem_start = op->resource[0].start; start 495 drivers/video/fbdev/cg14.c info->fix.smem_start = op->resource[1].start; start 512 drivers/video/fbdev/cg14.c is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) == start 526 drivers/video/fbdev/cg14.c (op->resource[0].start - start 527 drivers/video/fbdev/cg14.c op->resource[1].start); start 366 drivers/video/fbdev/cg3.c info->fix.smem_start = op->resource[0].start; start 758 drivers/video/fbdev/cg6.c info->fix.smem_start = op->resource[0].start; start 2105 drivers/video/fbdev/cirrusfb.c (unsigned long long)pdev->resource[0].start, cinfo->btype); start 2107 drivers/video/fbdev/cirrusfb.c (unsigned long long)pdev->resource[1].start); start 232 drivers/video/fbdev/clps711x-fb.c cfb->base = devm_ioremap(dev, res->start, resource_size(res)); start 238 drivers/video/fbdev/clps711x-fb.c info->fix.mmio_start = res->start; start 249 drivers/video/fbdev/clps711x-fb.c if (res->start & 0x0fffffff) { start 261 drivers/video/fbdev/clps711x-fb.c info->fix.smem_start = res->start; start 297 drivers/video/fbdev/cobalt_lcdfb.c info->screen_base = devm_ioremap(&dev->dev, res->start, start 306 drivers/video/fbdev/cobalt_lcdfb.c info->fix.smem_start = res->start; start 289 drivers/video/fbdev/controlfb.c unsigned long start; start 292 drivers/video/fbdev/controlfb.c start = info->fix.smem_start; start 294 drivers/video/fbdev/controlfb.c mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; start 299 drivers/video/fbdev/controlfb.c start = info->fix.mmio_start; start 307 drivers/video/fbdev/controlfb.c return vm_iomap_memory(vma, start, len); start 691 drivers/video/fbdev/controlfb.c p->fb_orig_base = fb_res.start; start 694 drivers/video/fbdev/controlfb.c p->frame_buffer_phys = fb_res.start + 0x800000; start 695 drivers/video/fbdev/controlfb.c p->control_regs_phys = reg_res.start; start 24 drivers/video/fbdev/core/fb_ddc.c unsigned char start = 0x0; start 31 drivers/video/fbdev/core/fb_ddc.c .buf = &start, start 68 drivers/video/fbdev/core/fb_defio.c int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 72 drivers/video/fbdev/core/fb_defio.c int err = file_write_and_wait_range(file, start, end); start 121 drivers/video/fbdev/core/fbcmap.c cmap->start = 0; start 171 drivers/video/fbdev/core/fbcmap.c if (to->start > from->start) start 172 drivers/video/fbdev/core/fbcmap.c fromoff = to->start - from->start; start 174 drivers/video/fbdev/core/fbcmap.c tooff = from->start - to->start; start 196 drivers/video/fbdev/core/fbcmap.c if (to->start > from->start) start 197 drivers/video/fbdev/core/fbcmap.c fromoff = to->start - from->start; start 199 drivers/video/fbdev/core/fbcmap.c tooff = from->start - to->start; start 233 drivers/video/fbdev/core/fbcmap.c int i, start, rc = 0; start 241 drivers/video/fbdev/core/fbcmap.c start = cmap->start; start 243 drivers/video/fbdev/core/fbcmap.c if (start < 0 || (!info->fbops->fb_setcolreg && start 255 drivers/video/fbdev/core/fbcmap.c if (info->fbops->fb_setcolreg(start++, start 287 drivers/video/fbdev/core/fbcmap.c umap.start = cmap->start; start 1661 drivers/video/fbdev/core/fbcon.c unsigned short *start; start 1667 drivers/video/fbdev/core/fbcon.c start = s; start 1673 drivers/video/fbdev/core/fbcon.c if (s > start) { start 1674 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, start 1676 drivers/video/fbdev/core/fbcon.c x += s - start; start 1677 drivers/video/fbdev/core/fbcon.c start = s; start 1681 drivers/video/fbdev/core/fbcon.c if (s > start) { start 1682 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, start 1684 drivers/video/fbdev/core/fbcon.c x += s - start + 1; start 1685 drivers/video/fbdev/core/fbcon.c start = s + 1; start 1688 drivers/video/fbdev/core/fbcon.c start++; start 1694 drivers/video/fbdev/core/fbcon.c if (s > start) start 1695 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, line, x); start 1715 drivers/video/fbdev/core/fbcon.c unsigned short *start = s; start 1725 drivers/video/fbdev/core/fbcon.c if (s > start) { start 1726 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, start 1728 drivers/video/fbdev/core/fbcon.c x += s - start; start 1729 drivers/video/fbdev/core/fbcon.c start = s; start 1735 drivers/video/fbdev/core/fbcon.c if (s > start) start 1736 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, dy, x); start 1752 drivers/video/fbdev/core/fbcon.c unsigned short *start = s; start 1761 drivers/video/fbdev/core/fbcon.c if (s > start) { start 1763 drivers/video/fbdev/core/fbcon.c line, x, 1, s-start); start 1764 drivers/video/fbdev/core/fbcon.c x += s - start + 1; start 1765 drivers/video/fbdev/core/fbcon.c start = s + 1; start 1768 drivers/video/fbdev/core/fbcon.c start++; start 1777 drivers/video/fbdev/core/fbcon.c if (s > start) start 1779 drivers/video/fbdev/core/fbcon.c s-start); start 1800 drivers/video/fbdev/core/fbcon.c unsigned short *start = s; start 1810 drivers/video/fbdev/core/fbcon.c if (s > start) { start 1811 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, start 1813 drivers/video/fbdev/core/fbcon.c x += s - start; start 1814 drivers/video/fbdev/core/fbcon.c start = s; start 1818 drivers/video/fbdev/core/fbcon.c if (s > start) { start 1819 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, start 1821 drivers/video/fbdev/core/fbcon.c x += s - start + 1; start 1822 drivers/video/fbdev/core/fbcon.c start = s + 1; start 1825 drivers/video/fbdev/core/fbcon.c start++; start 1833 drivers/video/fbdev/core/fbcon.c if (s > start) start 1834 drivers/video/fbdev/core/fbcon.c fbcon_putcs(vc, start, s - start, line, x); start 2750 drivers/video/fbdev/core/fbcon.c palette_cmap.start = 0; start 203 drivers/video/fbdev/core/fbmem.c palette_cmap.start = 0; start 215 drivers/video/fbdev/core/fbmem.c palette_cmap.start = 32 + i; start 732 drivers/video/fbdev/core/fbmem.c .start = fb_seq_start, start 1202 drivers/video/fbdev/core/fbmem.c u32 start; start 1221 drivers/video/fbdev/core/fbmem.c if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32))) start 1237 drivers/video/fbdev/core/fbmem.c if (copy_in_user(&cmap32->start, start 1238 drivers/video/fbdev/core/fbmem.c &cmap->start, start 1337 drivers/video/fbdev/core/fbmem.c unsigned long start; start 1361 drivers/video/fbdev/core/fbmem.c start = info->fix.smem_start; start 1363 drivers/video/fbdev/core/fbmem.c mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; start 1371 drivers/video/fbdev/core/fbmem.c start = info->fix.mmio_start; start 1382 drivers/video/fbdev/core/fbmem.c fb_pgprotect(file, vma, start); start 1384 drivers/video/fbdev/core/fbmem.c return vm_iomap_memory(vma, start, len); start 301 drivers/video/fbdev/da8xx-fb.c u32 start; start 314 drivers/video/fbdev/da8xx-fb.c start = par->dma_start; start 329 drivers/video/fbdev/da8xx-fb.c lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); start 331 drivers/video/fbdev/da8xx-fb.c lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); start 334 drivers/video/fbdev/da8xx-fb.c start = par->p_palette_base; start 335 drivers/video/fbdev/da8xx-fb.c end = start + par->palette_sz - 1; start 347 drivers/video/fbdev/da8xx-fb.c lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); start 1217 drivers/video/fbdev/da8xx-fb.c unsigned int start; start 1230 drivers/video/fbdev/da8xx-fb.c start = fix->smem_start + start 1233 drivers/video/fbdev/da8xx-fb.c end = start + fbi->var.yres * fix->line_length - 1; start 1234 drivers/video/fbdev/da8xx-fb.c par->dma_start = start; start 396 drivers/video/fbdev/efifb.c bar_resource->start + bar_offset != efifb_fix.smem_start) { start 399 drivers/video/fbdev/efifb.c efifb_fix.smem_start = bar_resource->start + bar_offset; start 662 drivers/video/fbdev/efifb.c if (res->start <= base && res->end >= base + size - 1) { start 663 drivers/video/fbdev/efifb.c record_efifb_bar_resource(dev, i, base - res->start); start 230 drivers/video/fbdev/ep93xx-fb.c unsigned int vlines_total, hclks_total, start, stop; start 241 drivers/video/fbdev/ep93xx-fb.c start = vlines_total; start 243 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VSYNC); start 245 drivers/video/fbdev/ep93xx-fb.c start = vlines_total - info->var.vsync_len - info->var.upper_margin; start 247 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VBLANK); start 248 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VACTIVE); start 250 drivers/video/fbdev/ep93xx-fb.c start = vlines_total; start 252 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VCLK); start 254 drivers/video/fbdev/ep93xx-fb.c start = hclks_total; start 256 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HSYNC); start 258 drivers/video/fbdev/ep93xx-fb.c start = hclks_total - info->var.hsync_len - info->var.left_margin; start 260 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HBLANK); start 261 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HACTIVE); start 263 drivers/video/fbdev/ep93xx-fb.c start = hclks_total; start 265 drivers/video/fbdev/ep93xx-fb.c ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HCLK); start 506 drivers/video/fbdev/ep93xx-fb.c fbi->mmio_base = devm_ioremap(&pdev->dev, res->start, start 682 drivers/video/fbdev/fb-puv3.c unifb_fix.mmio_start = iomem->start; start 927 drivers/video/fbdev/ffb.c par->physbase = op->resource[0].start; start 199 drivers/video/fbdev/goldfishfb.c fb->reg_base = ioremap(r->start, PAGE_SIZE); start 383 drivers/video/fbdev/grvga.c if (!devm_request_mem_region(&dev->dev, dev->resource[0].start, start 333 drivers/video/fbdev/hpfb.c paddr = d->resource.start; start 334 drivers/video/fbdev/hpfb.c if (!request_mem_region(d->resource.start, resource_size(&d->resource), d->name)) start 357 drivers/video/fbdev/hpfb.c release_mem_region(d->resource.start, resource_size(&d->resource)); start 706 drivers/video/fbdev/hyperv_fb.c fb_virt = ioremap(par->mem->start, screen_fb_size); start 724 drivers/video/fbdev/hyperv_fb.c info->fix.smem_start = par->mem->start; start 737 drivers/video/fbdev/hyperv_fb.c vmbus_free_mmio(par->mem->start, screen_fb_size); start 752 drivers/video/fbdev/hyperv_fb.c vmbus_free_mmio(par->mem->start, screen_fb_size); start 929 drivers/video/fbdev/imxfb.c res = request_mem_region(res->start, resource_size(res), start 969 drivers/video/fbdev/imxfb.c fbi->regs = ioremap(res->start, resource_size(res)); start 1054 drivers/video/fbdev/imxfb.c release_mem_region(res->start, resource_size(res)); start 1082 drivers/video/fbdev/imxfb.c release_mem_region(res->start, resource_size(res)); start 567 drivers/video/fbdev/leo.c info->fix.smem_start = op->resource[0].start; start 211 drivers/video/fbdev/matrox/matroxfb_accel.c int start, end; start 225 drivers/video/fbdev/matrox/matroxfb_accel.c start = sy*vxres+sx+curr_ydstorg(minfo); start 226 drivers/video/fbdev/matrox/matroxfb_accel.c end = start+width; start 235 drivers/video/fbdev/matrox/matroxfb_accel.c start = end+width; start 241 drivers/video/fbdev/matrox/matroxfb_accel.c mga_outl(M_AR3, start); start 253 drivers/video/fbdev/matrox/matroxfb_accel.c int start, end; start 267 drivers/video/fbdev/matrox/matroxfb_accel.c start = sy*vxres+sx+curr_ydstorg(minfo); start 268 drivers/video/fbdev/matrox/matroxfb_accel.c end = start+width; start 277 drivers/video/fbdev/matrox/matroxfb_accel.c start = end+width; start 283 drivers/video/fbdev/matrox/matroxfb_accel.c mga_outl(M_AR3, start); start 829 drivers/video/fbdev/matrox/matroxfb_base.c minfo->outputs[out].output->start) { start 830 drivers/video/fbdev/matrox/matroxfb_base.c minfo->outputs[out].output->start(minfo->outputs[out].data); start 301 drivers/video/fbdev/matrox/matroxfb_base.h int (*start)(void* altout_dev); start 385 drivers/video/fbdev/matrox/matroxfb_crtc2.c minfo->outputs[out].output->start) { start 386 drivers/video/fbdev/matrox/matroxfb_crtc2.c minfo->outputs[out].output->start(minfo->outputs[out].data); start 1194 drivers/video/fbdev/matrox/matroxfb_maven.c .start = maven_out_start, start 698 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c par->res = request_mem_region(res.start, res_size, DRV_NAME); start 709 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c par->fb_base_phys = res.start; start 710 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c par->mmio_base_phys = res.start + MB862XX_MMIO_BASE; start 774 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c release_mem_region(res.start, res_size); start 810 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c release_mem_region(par->res->start, res_size); start 884 drivers/video/fbdev/mbx/mbxfb.c #define res_size(_r) (((_r)->end - (_r)->start) + 1) start 923 drivers/video/fbdev/mbx/mbxfb.c mfbi->fb_req = request_mem_region(mfbi->fb_res->start, start 930 drivers/video/fbdev/mbx/mbxfb.c mfbi->fb_phys_addr = mfbi->fb_res->start; start 932 drivers/video/fbdev/mbx/mbxfb.c mfbi->reg_req = request_mem_region(mfbi->reg_res->start, start 939 drivers/video/fbdev/mbx/mbxfb.c mfbi->reg_phys_addr = mfbi->reg_res->start; start 1000 drivers/video/fbdev/mbx/mbxfb.c release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res)); start 1002 drivers/video/fbdev/mbx/mbxfb.c release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res)); start 1027 drivers/video/fbdev/mbx/mbxfb.c release_mem_region(mfbi->reg_req->start, start 1030 drivers/video/fbdev/mbx/mbxfb.c release_mem_region(mfbi->fb_req->start, start 148 drivers/video/fbdev/metronomefb.c static u8 calc_cksum(int start, int end, u8 *mem) start 153 drivers/video/fbdev/metronomefb.c for (i = start; i < end; i++) start 159 drivers/video/fbdev/metronomefb.c static u16 calc_img_cksum(u16 *start, int length) start 164 drivers/video/fbdev/metronomefb.c tmp += *start++; start 480 drivers/video/fbdev/mmp/hw/mmp_ctrl.c if (!devm_request_mem_region(ctrl->dev, res->start, start 489 drivers/video/fbdev/mmp/hw/mmp_ctrl.c res->start, resource_size(res)); start 404 drivers/video/fbdev/mmp/hw/mmp_ctrl.h #define CFG_SPI_START(start) (start) start 1576 drivers/video/fbdev/mx3fb.c mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg)); start 1685 drivers/video/fbdev/nvidia/nv_hw.c void NVSetStartAddress(struct nvidia_par *par, u32 start) start 1687 drivers/video/fbdev/nvidia/nv_hw.c NV_WR32(par->PCRTC, 0x800, start); start 298 drivers/video/fbdev/omap/omapfb_main.c index = cmap->start; start 48 drivers/video/fbdev/omap2/omapfb/dss/dispc.c #define REG_GET(idx, start, end) \ start 49 drivers/video/fbdev/omap2/omapfb/dss/dispc.c FLD_GET(dispc_read_reg(idx), start, end) start 51 drivers/video/fbdev/omap2/omapfb/dss/dispc.c #define REG_FLD_MOD(idx, val, start, end) \ start 52 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end)) start 1148 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u8 start, end; start 1154 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); start 1157 drivers/video/fbdev/omap2/omapfb/dss/dispc.c size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end); start 4063 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc.base = devm_ioremap(&pdev->dev, dispc_mem->start, start 110 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define REG_GET(dsidev, idx, start, end) \ start 111 drivers/video/fbdev/omap2/omapfb/dss/dsi.c FLD_GET(dsi_read_reg(dsidev, idx), start, end) start 113 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define REG_FLD_MOD(dsidev, idx, val, start, end) \ start 114 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end)) start 1196 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_FLD_GET(fld, start, end)\ start 1197 drivers/video/fbdev/omap2/omapfb/dss/dsi.c FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end) start 5301 drivers/video/fbdev/omap2/omapfb/dss/dsi.c temp_res.start = res->start; start 5302 drivers/video/fbdev/omap2/omapfb/dss/dsi.c temp_res.end = temp_res.start + DSI_PROTO_SZ - 1; start 5308 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi->proto_base = devm_ioremap(&dsidev->dev, res->start, start 5323 drivers/video/fbdev/omap2/omapfb/dss/dsi.c temp_res.start = res->start + DSI_PHY_OFFSET; start 5324 drivers/video/fbdev/omap2/omapfb/dss/dsi.c temp_res.end = temp_res.start + DSI_PHY_SZ - 1; start 5328 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi->phy_base = devm_ioremap(&dsidev->dev, res->start, start 5343 drivers/video/fbdev/omap2/omapfb/dss/dsi.c temp_res.start = res->start + DSI_PLL_OFFSET; start 5344 drivers/video/fbdev/omap2/omapfb/dss/dsi.c temp_res.end = temp_res.start + DSI_PLL_SZ - 1; start 5348 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi->pll_base = devm_ioremap(&dsidev->dev, res->start, start 5380 drivers/video/fbdev/omap2/omapfb/dss/dsi.c while (d->address != 0 && d->address != dsi_mem->start) start 55 drivers/video/fbdev/omap2/omapfb/dss/dss.c #define REG_GET(idx, start, end) \ start 56 drivers/video/fbdev/omap2/omapfb/dss/dss.c FLD_GET(dss_read_reg(idx), start, end) start 58 drivers/video/fbdev/omap2/omapfb/dss/dss.c #define REG_FLD_MOD(idx, val, start, end) \ start 59 drivers/video/fbdev/omap2/omapfb/dss/dss.c dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end)) start 398 drivers/video/fbdev/omap2/omapfb/dss/dss.c u8 start, end; start 415 drivers/video/fbdev/omap2/omapfb/dss/dss.c dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end); start 417 drivers/video/fbdev/omap2/omapfb/dss/dss.c REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */ start 1086 drivers/video/fbdev/omap2/omapfb/dss/dss.c dss.base = devm_ioremap(&pdev->dev, dss_mem->start, start 59 drivers/video/fbdev/omap2/omapfb/dss/dss.h #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) start 60 drivers/video/fbdev/omap2/omapfb/dss/dss.h #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) start 61 drivers/video/fbdev/omap2/omapfb/dss/dss.h #define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) start 62 drivers/video/fbdev/omap2/omapfb/dss/dss.h #define FLD_MOD(orig, val, start, end) \ start 63 drivers/video/fbdev/omap2/omapfb/dss/dss.h (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) start 22 drivers/video/fbdev/omap2/omapfb/dss/dss_features.c u8 start, end; start 881 drivers/video/fbdev/omap2/omapfb/dss/dss_features.c void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end) start 885 drivers/video/fbdev/omap2/omapfb/dss/dss_features.c *start = omap_current_dss_features->reg_fields[id].start; start 91 drivers/video/fbdev/omap2/omapfb/dss/dss_features.h void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); start 258 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h #define REG_FLD_MOD(base, idx, val, start, end) \ start 260 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h val, start, end)) start 261 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h #define REG_GET(base, idx, start, end) \ start 262 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h FLD_GET(hdmi_read_reg(base, idx), start, end) start 267 drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c wp->phys_base = res->start; start 864 drivers/video/fbdev/omap2/omapfb/dss/venc.c venc.base = devm_ioremap(&pdev->dev, venc_mem->start, start 27 drivers/video/fbdev/omap2/omapfb/dss/video-pll.c #define REG_MOD(reg, val, start, end) \ start 28 drivers/video/fbdev/omap2/omapfb/dss/video-pll.c writel_relaxed(FLD_MOD(readl_relaxed(reg), val, start, end), reg) start 1094 drivers/video/fbdev/omap2/omapfb/omapfb-main.c unsigned long start; start 1100 drivers/video/fbdev/omap2/omapfb/omapfb-main.c start = omapfb_get_region_paddr(ofbi); start 1103 drivers/video/fbdev/omap2/omapfb/omapfb-main.c DBG("user mmap region start %lx, len %d, off %lx\n", start, len, start 1110 drivers/video/fbdev/omap2/omapfb/omapfb-main.c r = vm_iomap_memory(vma, start, len); start 1203 drivers/video/fbdev/omap2/omapfb/omapfb-main.c index = cmap->start; start 1473 drivers/video/fbdev/omap2/omapfb/omapfb-main.c char *p, *start; start 1475 drivers/video/fbdev/omap2/omapfb/omapfb-main.c start = (char *)param; start 1478 drivers/video/fbdev/omap2/omapfb/omapfb-main.c p = start; start 1482 drivers/video/fbdev/omap2/omapfb/omapfb-main.c if (p == start) start 1520 drivers/video/fbdev/omap2/omapfb/omapfb-main.c start = p; start 365 drivers/video/fbdev/omap2/omapfb/vrfb.c ctxs[i].base = mem->start; start 270 drivers/video/fbdev/p9100.c info->fix.smem_start = op->resource[2].start; start 553 drivers/video/fbdev/platinumfb.c (unsigned long long)pinfo->rsrc_reg.start, start 556 drivers/video/fbdev/platinumfb.c (unsigned long long)pinfo->rsrc_fb.start, start 562 drivers/video/fbdev/platinumfb.c if (!request_mem_region(pinfo->rsrc_fb.start, start 571 drivers/video/fbdev/platinumfb.c pinfo->frame_buffer_phys = pinfo->rsrc_fb.start; start 572 drivers/video/fbdev/platinumfb.c pinfo->frame_buffer = ioremap_wt(pinfo->rsrc_fb.start, 0x400000); start 576 drivers/video/fbdev/platinumfb.c pinfo->platinum_regs_phys = pinfo->rsrc_reg.start; start 577 drivers/video/fbdev/platinumfb.c pinfo->platinum_regs = ioremap(pinfo->rsrc_reg.start, 0x1000); start 651 drivers/video/fbdev/platinumfb.c release_mem_region(pinfo->rsrc_fb.start, start 162 drivers/video/fbdev/pmag-aa-fb.c resource_size_t start, len; start 180 drivers/video/fbdev/pmag-aa-fb.c start = tdev->resource.start; start 181 drivers/video/fbdev/pmag-aa-fb.c len = tdev->resource.end - start + 1; start 182 drivers/video/fbdev/pmag-aa-fb.c if (!request_mem_region(start, len, dev_name(dev))) { start 190 drivers/video/fbdev/pmag-aa-fb.c info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET; start 201 drivers/video/fbdev/pmag-aa-fb.c info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET; start 241 drivers/video/fbdev/pmag-aa-fb.c release_mem_region(start, len); start 253 drivers/video/fbdev/pmag-aa-fb.c resource_size_t start, len; start 259 drivers/video/fbdev/pmag-aa-fb.c start = tdev->resource.start; start 260 drivers/video/fbdev/pmag-aa-fb.c len = tdev->resource.end - start + 1; start 261 drivers/video/fbdev/pmag-aa-fb.c release_mem_region(start, len); start 147 drivers/video/fbdev/pmag-ba-fb.c resource_size_t start, len; start 172 drivers/video/fbdev/pmag-ba-fb.c start = tdev->resource.start; start 173 drivers/video/fbdev/pmag-ba-fb.c len = tdev->resource.end - start + 1; start 174 drivers/video/fbdev/pmag-ba-fb.c if (!request_mem_region(start, len, dev_name(dev))) { start 182 drivers/video/fbdev/pmag-ba-fb.c info->fix.mmio_start = start; start 192 drivers/video/fbdev/pmag-ba-fb.c info->fix.smem_start = start + PMAG_BA_FBMEM; start 226 drivers/video/fbdev/pmag-ba-fb.c release_mem_region(start, len); start 241 drivers/video/fbdev/pmag-ba-fb.c resource_size_t start, len; start 247 drivers/video/fbdev/pmag-ba-fb.c start = tdev->resource.start; start 248 drivers/video/fbdev/pmag-ba-fb.c len = tdev->resource.end - start + 1; start 249 drivers/video/fbdev/pmag-ba-fb.c release_mem_region(start, len); start 252 drivers/video/fbdev/pmagb-b-fb.c resource_size_t start, len; start 279 drivers/video/fbdev/pmagb-b-fb.c start = tdev->resource.start; start 280 drivers/video/fbdev/pmagb-b-fb.c len = tdev->resource.end - start + 1; start 281 drivers/video/fbdev/pmagb-b-fb.c if (!request_mem_region(start, len, dev_name(dev))) { start 289 drivers/video/fbdev/pmagb-b-fb.c info->fix.mmio_start = start; start 300 drivers/video/fbdev/pmagb-b-fb.c info->fix.smem_start = start + PMAGB_B_FBMEM; start 344 drivers/video/fbdev/pmagb-b-fb.c release_mem_region(start, len); start 359 drivers/video/fbdev/pmagb-b-fb.c resource_size_t start, len; start 365 drivers/video/fbdev/pmagb-b-fb.c start = tdev->resource.start; start 366 drivers/video/fbdev/pmagb-b-fb.c len = tdev->resource.end - start + 1; start 367 drivers/video/fbdev/pmagb-b-fb.c release_mem_region(start, len); start 644 drivers/video/fbdev/pvr2fb.c unsigned long dst, start, end, len; start 665 drivers/video/fbdev/pvr2fb.c start = (unsigned long)page_address(pages[0]); start 670 drivers/video/fbdev/pvr2fb.c if (start + len == end) { start 677 drivers/video/fbdev/pvr2fb.c dma_write(shdma, start, 0, len); start 659 drivers/video/fbdev/pxa168fb.c info->fix.mmio_start = res->start; start 668 drivers/video/fbdev/pxa168fb.c fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start, start 195 drivers/video/fbdev/pxa168fb.h #define CFG_SPI_START(start) (start) start 496 drivers/video/fbdev/pxa3xx-gcu.c priv->resource_mem->start >> PAGE_SHIFT, start 666 drivers/video/fbdev/pxa3xx-gcu.c (void *) r->start, (void *) priv->shared_phys, start 616 drivers/video/fbdev/pxafb.c unsigned long start = ofb->video_mem_phys; start 617 drivers/video/fbdev/pxafb.c setup_frame_dma(ofb->fbi, DMA_OV1, PAL_NONE, start, size); start 657 drivers/video/fbdev/pxafb.c unsigned long start[3] = { ofb->video_mem_phys, 0, 0 }; start 661 drivers/video/fbdev/pxafb.c setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size); start 669 drivers/video/fbdev/pxafb.c start[1] = start[0] + size; start 670 drivers/video/fbdev/pxafb.c start[2] = start[1] + size / div; start 671 drivers/video/fbdev/pxafb.c setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size); start 672 drivers/video/fbdev/pxafb.c setup_frame_dma(ofb->fbi, DMA_OV2_Cb, -1, start[1], size / div); start 673 drivers/video/fbdev/pxafb.c setup_frame_dma(ofb->fbi, DMA_OV2_Cr, -1, start[2], size / div); start 1067 drivers/video/fbdev/pxafb.c unsigned long start, size_t size) start 1078 drivers/video/fbdev/pxafb.c dma_desc->fsadr = start; start 1834 drivers/video/fbdev/riva/riva_hw.c unsigned start start 1837 drivers/video/fbdev/riva/riva_hw.c NV_WR32(chip->PCRTC, 0x800, start); start 1843 drivers/video/fbdev/riva/riva_hw.c unsigned start start 1846 drivers/video/fbdev/riva/riva_hw.c int offset = start >> 2; start 1847 drivers/video/fbdev/riva/riva_hw.c int pan = (start & 3) << 1; start 372 drivers/video/fbdev/s1d13xxxfb.c u32 start; start 380 drivers/video/fbdev/s1d13xxxfb.c start = (info->fix.line_length >> 1) * var->yoffset; start 384 drivers/video/fbdev/s1d13xxxfb.c s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START0, (start & 0xff)); start 385 drivers/video/fbdev/s1d13xxxfb.c s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START1, ((start >> 8) & 0xff)); start 386 drivers/video/fbdev/s1d13xxxfb.c s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START2, ((start >> 16) & 0x0f)); start 389 drivers/video/fbdev/s1d13xxxfb.c s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START0, (start & 0xff)); start 390 drivers/video/fbdev/s1d13xxxfb.c s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START1, ((start >> 8) & 0xff)); start 391 drivers/video/fbdev/s1d13xxxfb.c s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START2, ((start >> 16) & 0x0f)); start 748 drivers/video/fbdev/s1d13xxxfb.c release_mem_region(pdev->resource[0].start, start 749 drivers/video/fbdev/s1d13xxxfb.c pdev->resource[0].end - pdev->resource[0].start +1); start 750 drivers/video/fbdev/s1d13xxxfb.c release_mem_region(pdev->resource[1].start, start 751 drivers/video/fbdev/s1d13xxxfb.c pdev->resource[1].end - pdev->resource[1].start +1); start 790 drivers/video/fbdev/s1d13xxxfb.c if (!request_mem_region(pdev->resource[0].start, start 791 drivers/video/fbdev/s1d13xxxfb.c pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) { start 797 drivers/video/fbdev/s1d13xxxfb.c if (!request_mem_region(pdev->resource[1].start, start 798 drivers/video/fbdev/s1d13xxxfb.c pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) { start 812 drivers/video/fbdev/s1d13xxxfb.c default_par->regs = ioremap_nocache(pdev->resource[1].start, start 813 drivers/video/fbdev/s1d13xxxfb.c pdev->resource[1].end - pdev->resource[1].start +1); start 821 drivers/video/fbdev/s1d13xxxfb.c info->screen_base = ioremap_nocache(pdev->resource[0].start, start 822 drivers/video/fbdev/s1d13xxxfb.c pdev->resource[0].end - pdev->resource[0].start +1); start 859 drivers/video/fbdev/s1d13xxxfb.c info->fix.mmio_start = pdev->resource[1].start; start 860 drivers/video/fbdev/s1d13xxxfb.c info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1; start 861 drivers/video/fbdev/s1d13xxxfb.c info->fix.smem_start = pdev->resource[0].start; start 862 drivers/video/fbdev/s1d13xxxfb.c info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1; start 1427 drivers/video/fbdev/s3c-fb.c sfb->irq_no = res->start; start 875 drivers/video/fbdev/s3c2410fb.c info->mem = request_mem_region(res->start, size, pdev->name); start 882 drivers/video/fbdev/s3c2410fb.c info->io = ioremap(res->start, size); start 1006 drivers/video/fbdev/s3c2410fb.c release_mem_region(res->start, size); start 1051 drivers/video/fbdev/s3c2410fb.c release_mem_region(info->mem->start, resource_size(info->mem)); start 1165 drivers/video/fbdev/s3fb.c bus_reg.start = 0; start 1172 drivers/video/fbdev/s3fb.c par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start; start 81 drivers/video/fbdev/savage/savagefb_driver.c static void vgaHWSeqReset(struct savagefb_par *par, int start) start 83 drivers/video/fbdev/savage/savagefb_driver.c if (start) start 151 drivers/video/fbdev/sbuslib.c cmap.start = index + i; start 458 drivers/video/fbdev/sh7760fb.c par->ioarea = request_mem_region(res->start, start 466 drivers/video/fbdev/sh7760fb.c par->base = ioremap_nocache(res->start, resource_size(res)); start 551 drivers/video/fbdev/sh7760fb.c release_mem_region(res->start, resource_size(res)); start 569 drivers/video/fbdev/sh7760fb.c release_mem_region(par->ioarea->start, resource_size(par->ioarea)); start 700 drivers/video/fbdev/sh_mobile_lcdcfb.c int start) start 706 drivers/video/fbdev/sh_mobile_lcdcfb.c if (start) start 717 drivers/video/fbdev/sh_mobile_lcdcfb.c if (start && tmp == LDPMR_LPS) start 719 drivers/video/fbdev/sh_mobile_lcdcfb.c if (!start && tmp == 0) start 724 drivers/video/fbdev/sh_mobile_lcdcfb.c if (!start) start 2591 drivers/video/fbdev/sh_mobile_lcdcfb.c priv->base = ioremap_nocache(res->start, resource_size(res)); start 436 drivers/video/fbdev/simplefb.c info->fix.smem_start = mem->start; start 488 drivers/video/fbdev/sm501fb.c info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; start 1223 drivers/video/fbdev/sm501fb.c unsigned int start, unsigned int len) start 1229 drivers/video/fbdev/sm501fb.c for (reg = start; reg < (len + start); reg += 4) start 1546 drivers/video/fbdev/sm501fb.c info->regs_res = request_mem_region(res->start, start 1556 drivers/video/fbdev/sm501fb.c info->regs = ioremap(res->start, resource_size(res)); start 1572 drivers/video/fbdev/sm501fb.c info->regs2d_res = request_mem_region(res->start, start 1582 drivers/video/fbdev/sm501fb.c info->regs2d = ioremap(res->start, resource_size(res)); start 1597 drivers/video/fbdev/sm501fb.c info->fbmem_res = request_mem_region(res->start, start 1606 drivers/video/fbdev/sm501fb.c info->fbmem = ioremap(res->start, resource_size(res)); start 1635 drivers/video/fbdev/sm501fb.c release_mem_region(info->fbmem_res->start, start 1642 drivers/video/fbdev/sm501fb.c release_mem_region(info->regs2d_res->start, start 1649 drivers/video/fbdev/sm501fb.c release_mem_region(info->regs_res->start, start 1662 drivers/video/fbdev/sm501fb.c release_mem_region(info->fbmem_res->start, start 1666 drivers/video/fbdev/sm501fb.c release_mem_region(info->regs2d_res->start, start 1670 drivers/video/fbdev/sm501fb.c release_mem_region(info->regs_res->start, start 777 drivers/video/fbdev/smscufx.c unsigned long start = vma->vm_start; start 796 drivers/video/fbdev/smscufx.c if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) start 799 drivers/video/fbdev/smscufx.c start += PAGE_SIZE; start 907 drivers/video/fbdev/smscufx.c int start = max((int)(offset / info->fix.line_length), 0); start 911 drivers/video/fbdev/smscufx.c ufx_handle_damage(dev, 0, start, info->var.xres, lines); start 132 drivers/video/fbdev/sunxvr1000.c gp->fb_base_phys = op->resource[6].start; start 420 drivers/video/fbdev/tcx.c info->fix.smem_start = op->resource[0].start; start 439 drivers/video/fbdev/tcx.c par->mmap_map[i].poff = op->resource[j].start; start 1432 drivers/video/fbdev/tgafb.c bar0_start = to_tc_dev(dev)->resource.start; start 1550 drivers/video/fbdev/tgafb.c bar0_start = to_tc_dev(dev)->resource.start; start 272 drivers/video/fbdev/tmiofb.c base = nlcr->start; start 290 drivers/video/fbdev/tmiofb.c base = vram->start + info->screen_size; start 715 drivers/video/fbdev/tmiofb.c info->fix.smem_start = vram->start; start 719 drivers/video/fbdev/tmiofb.c info->fix.mmio_start = lcr->start; start 725 drivers/video/fbdev/tmiofb.c par->ccr = ioremap(ccr->start, resource_size(ccr)); start 324 drivers/video/fbdev/udlfb.c unsigned long start = vma->vm_start; start 343 drivers/video/fbdev/udlfb.c if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) start 346 drivers/video/fbdev/udlfb.c start += PAGE_SIZE; start 371 drivers/video/fbdev/udlfb.c int start = width; start 376 drivers/video/fbdev/udlfb.c start = j; start 388 drivers/video/fbdev/udlfb.c identical = start + (width - end); start 389 drivers/video/fbdev/udlfb.c *bfront = (u8 *) &front[start]; start 390 drivers/video/fbdev/udlfb.c *width_bytes = (end - start) * sizeof(unsigned long); start 729 drivers/video/fbdev/udlfb.c int start = max((int)(offset / info->fix.line_length), 0); start 733 drivers/video/fbdev/udlfb.c dlfb_handle_damage(dlfb, 0, start, info->var.xres, start 923 drivers/video/fbdev/uvesafb.c int start, struct fb_info *info) start 936 drivers/video/fbdev/uvesafb.c if (start + count > 256) start 944 drivers/video/fbdev/uvesafb.c outb_p(start + i, dac_reg); start 958 drivers/video/fbdev/uvesafb.c "d" (start), /* EDX */ start 973 drivers/video/fbdev/uvesafb.c task->t.regs.edx = start; start 1045 drivers/video/fbdev/uvesafb.c if (cmap->start + cmap->len > info->cmap.start + start 1046 drivers/video/fbdev/uvesafb.c info->cmap.len || cmap->start < info->cmap.start) start 1060 drivers/video/fbdev/uvesafb.c err = uvesafb_setpalette(entries, cmap->len, cmap->start, info); start 1069 drivers/video/fbdev/uvesafb.c err |= uvesafb_setcolreg(cmap->start + i, cmap->red[i], start 342 drivers/video/fbdev/valkyriefb.c frame_buffer_phys = r.start; start 343 drivers/video/fbdev/valkyriefb.c cmap_regs_phys = r.start + 0x304000; start 55 drivers/video/fbdev/via/via_aux.h static inline bool via_aux_read(struct via_aux_drv *drv, u8 start, u8 *buf, start 59 drivers/video/fbdev/via/via_aux.h {.addr = drv->addr, .flags = 0, .len = 1, .buf = &start}, start 321 drivers/video/fbdev/vt8500lcdfb.c res = request_mem_region(res->start, resource_size(res), "vt8500lcd"); start 327 drivers/video/fbdev/vt8500lcdfb.c fbi->regbase = ioremap(res->start, resource_size(res)); start 438 drivers/video/fbdev/vt8500lcdfb.c release_mem_region(res->start, resource_size(res)); start 464 drivers/video/fbdev/vt8500lcdfb.c release_mem_region(res->start, resource_size(res)); start 715 drivers/video/fbdev/vt8623fb.c bus_reg.start = 0; start 722 drivers/video/fbdev/vt8623fb.c par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start; start 653 drivers/video/fbdev/w100fb.c remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); start 658 drivers/video/fbdev/w100fb.c remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); start 674 drivers/video/fbdev/w100fb.c printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE); start 677 drivers/video/fbdev/w100fb.c remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); start 721 drivers/video/fbdev/w100fb.c info->fix.smem_start = mem->start+W100_FB_BASE; start 722 drivers/video/fbdev/w100fb.c info->fix.mmio_start = mem->start+W100_REG_BASE; start 135 drivers/video/fbdev/wmt_ge_rops.c regbase = ioremap(res->start, resource_size(res)); start 279 drivers/video/fbdev/xilinxfb.c drvdata->regs_phys = res->start; start 442 drivers/video/fbdev/xilinxfb.c int start; start 444 drivers/video/fbdev/xilinxfb.c start = dcr_resource_start(pdev->dev.of_node, 0); start 446 drivers/video/fbdev/xilinxfb.c drvdata->dcr_host = dcr_map(pdev->dev.of_node, start, drvdata->dcr_len); start 542 drivers/virtio/virtio_mmio.c if (!devm_request_mem_region(&pdev->dev, mem->start, start 557 drivers/virtio/virtio_mmio.c vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); start 663 drivers/virtio/virtio_mmio.c resources[0].start = base; start 667 drivers/virtio/virtio_mmio.c resources[1].start = resources[1].end = irq; start 680 drivers/virtio/virtio_mmio.c (unsigned long long)resources[0].start, start 682 drivers/virtio/virtio_mmio.c (int)resources[1].start); start 698 drivers/virtio/virtio_mmio.c pdev->resource[0].end - pdev->resource[0].start + 1ULL, start 699 drivers/virtio/virtio_mmio.c (unsigned long long)pdev->resource[0].start, start 700 drivers/virtio/virtio_mmio.c (unsigned long long)pdev->resource[1].start, start 68 drivers/virtio/virtio_pci_modern.c u32 start, u32 size, start 83 drivers/virtio/virtio_pci_modern.c if (length <= start) { start 86 drivers/virtio/virtio_pci_modern.c length, start); start 90 drivers/virtio/virtio_pci_modern.c if (length - start < minlen) { start 97 drivers/virtio/virtio_pci_modern.c length -= start; start 99 drivers/virtio/virtio_pci_modern.c if (start + offset < offset) { start 102 drivers/virtio/virtio_pci_modern.c start, offset); start 106 drivers/virtio/virtio_pci_modern.c offset += start; start 687 drivers/vlynq/vlynq.c dev->regs_start = regs_res->start; start 689 drivers/vlynq/vlynq.c dev->mem_start = mem_res->start; start 693 drivers/vlynq/vlynq.c if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { start 700 drivers/vlynq/vlynq.c dev->local = ioremap(regs_res->start, len); start 712 drivers/vlynq/vlynq.c dev->irq_start = irq_res->start; start 741 drivers/vlynq/vlynq.c release_mem_region(regs_res->start, len); start 517 drivers/vme/bridges/vme_ca91cx42.c image->bus_resource.start); start 542 drivers/vme/bridges/vme_ca91cx42.c image->bus_resource.start = 0; start 553 drivers/vme/bridges/vme_ca91cx42.c (unsigned long)image->bus_resource.start); start 558 drivers/vme/bridges/vme_ca91cx42.c image->bus_resource.start, size); start 640 drivers/vme/bridges/vme_ca91cx42.c pci_base = (unsigned long long)image->bus_resource.start; start 423 drivers/vme/bridges/vme_fake.c unsigned long long start, end, offset; start 427 drivers/vme/bridges/vme_fake.c start = bridge->slaves[i].vme_base; start 436 drivers/vme/bridges/vme_fake.c if ((addr >= start) && (addr < end)) { start 456 drivers/vme/bridges/vme_fake.c unsigned long long start, end, offset; start 466 drivers/vme/bridges/vme_fake.c start = bridge->slaves[i].vme_base; start 469 drivers/vme/bridges/vme_fake.c if ((addr >= start) && ((addr + 1) < end)) { start 489 drivers/vme/bridges/vme_fake.c unsigned long long start, end, offset; start 499 drivers/vme/bridges/vme_fake.c start = bridge->slaves[i].vme_base; start 502 drivers/vme/bridges/vme_fake.c if ((addr >= start) && ((addr + 3) < end)) { start 620 drivers/vme/bridges/vme_fake.c unsigned long long start, end, offset; start 630 drivers/vme/bridges/vme_fake.c start = bridge->slaves[i].vme_base; start 633 drivers/vme/bridges/vme_fake.c if ((addr >= start) && (addr < end)) { start 651 drivers/vme/bridges/vme_fake.c unsigned long long start, end, offset; start 661 drivers/vme/bridges/vme_fake.c start = bridge->slaves[i].vme_base; start 664 drivers/vme/bridges/vme_fake.c if ((addr >= start) && ((addr + 1) < end)) { start 682 drivers/vme/bridges/vme_fake.c unsigned long long start, end, offset; start 692 drivers/vme/bridges/vme_fake.c start = bridge->slaves[i].vme_base; start 695 drivers/vme/bridges/vme_fake.c if ((addr >= start) && ((addr + 3) < end)) { start 729 drivers/vme/bridges/vme_tsi148.c image->bus_resource.start); start 758 drivers/vme/bridges/vme_tsi148.c image->bus_resource.start = 0; start 769 drivers/vme/bridges/vme_tsi148.c (unsigned long)image->bus_resource.start); start 774 drivers/vme/bridges/vme_tsi148.c image->bus_resource.start, size); start 865 drivers/vme/bridges/vme_tsi148.c pci_base = region.start; start 810 drivers/vme/vme.c phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT); start 1266 drivers/vme/vme.c (address >= handler->start) && start 1294 drivers/vme/vme.c handler->start = address; start 83 drivers/vme/vme_bridge.h unsigned long long start; /* Beginning of error window */ start 526 drivers/w1/masters/ds1wm.c ds1wm_data->map = devm_ioremap(&pdev->dev, res->start, start 562 drivers/w1/masters/ds1wm.c ds1wm_data->irq = res->start; start 240 drivers/watchdog/armada_37xx_wdt.c .start = armada_37xx_wdt_start, start 276 drivers/watchdog/armada_37xx_wdt.c dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); start 190 drivers/watchdog/asm9260_wdt.c .start = asm9260_wdt_enable, start 226 drivers/watchdog/aspeed_wdt.c .start = aspeed_wdt_start, start 260 drivers/watchdog/at91sam9_wdt.c .start = at91_wdt_start, start 109 drivers/watchdog/atlas7_wdt.c .start = atlas7_wdt_enable, start 127 drivers/watchdog/bcm2835_wdt.c .start = bcm2835_wdt_start, start 99 drivers/watchdog/bcm47xx_wdt.c .start = bcm47xx_wdt_hard_start, start 170 drivers/watchdog/bcm47xx_wdt.c .start = bcm47xx_wdt_soft_start, start 248 drivers/watchdog/bcm63xx_wdt.c bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start, start 104 drivers/watchdog/bcm7038_wdt.c .start = bcm7038_wdt_start, start 248 drivers/watchdog/bcm_kona_wdt.c .start = bcm_kona_wdt_start, start 219 drivers/watchdog/bd70528_wdt.c .start = bd70528_wdt_start, start 198 drivers/watchdog/booke_wdt.c .start = booke_wdt_start, start 271 drivers/watchdog/cadence_wdt.c .start = cdns_wdt_start, start 226 drivers/watchdog/coh901327_wdt.c .start = coh901327_start, start 144 drivers/watchdog/da9052_wdt.c .start = da9052_wdt_start, start 114 drivers/watchdog/da9055_wdt.c .start = da9055_wdt_start, start 165 drivers/watchdog/da9062_wdt.c .start = da9062_wdt_start, start 182 drivers/watchdog/da9063_wdt.c .start = da9063_wdt_start, start 185 drivers/watchdog/davinci_wdt.c .start = davinci_wdt_start, start 208 drivers/watchdog/diag288_wdt.c .start = wdt_start, start 98 drivers/watchdog/digicolor_wdt.c .start = dc_wdt_start, start 192 drivers/watchdog/dw_wdt.c .start = dw_wdt_start, start 80 drivers/watchdog/ebc-c384_wdt.c .start = ebc_c384_wdt_start, start 85 drivers/watchdog/ep93xx_wdt.c .start = ep93xx_wdt_start, start 106 drivers/watchdog/ftwdt010_wdt.c .start = ftwdt010_wdt_start, start 101 drivers/watchdog/gpio_wdt.c .start = gpio_wdt_start, start 214 drivers/watchdog/hpwdt.c .start = hpwdt_start, start 198 drivers/watchdog/i6300esb.c .start = esb_timer_start, start 82 drivers/watchdog/iTCO_vendor_support.c val32 = inl(smires->start); start 84 drivers/watchdog/iTCO_vendor_support.c outl(val32, smires->start); /* Needed to activate watchdog */ start 92 drivers/watchdog/iTCO_vendor_support.c val32 = inl(smires->start); start 94 drivers/watchdog/iTCO_vendor_support.c outl(val32, smires->start); /* Needed to deactivate watchdog */ start 132 drivers/watchdog/iTCO_vendor_support.c val32 = inl(smires->start); start 136 drivers/watchdog/iTCO_vendor_support.c outl(val32, smires->start); start 143 drivers/watchdog/iTCO_vendor_support.c val32 = inl(smires->start); start 147 drivers/watchdog/iTCO_vendor_support.c outl(val32, smires->start); start 72 drivers/watchdog/iTCO_wdt.c #define TCOBASE(p) ((p)->tco_res->start) start 74 drivers/watchdog/iTCO_wdt.c #define SMI_EN(p) ((p)->smi_res->start) start 430 drivers/watchdog/iTCO_wdt.c .start = iTCO_wdt_start, start 468 drivers/watchdog/iTCO_wdt.c if (!devm_request_region(dev, p->smi_res->start, start 517 drivers/watchdog/iTCO_wdt.c if (!devm_request_region(dev, p->tco_res->start, start 166 drivers/watchdog/ie6xx_wdt.c .start = ie6xx_wdt_start, start 234 drivers/watchdog/ie6xx_wdt.c if (!request_region(res->start, resource_size(res), pdev->name)) { start 236 drivers/watchdog/ie6xx_wdt.c (u64)res->start); start 240 drivers/watchdog/ie6xx_wdt.c ie6xx_wdt_data.sch_wdtba = res->start; start 264 drivers/watchdog/ie6xx_wdt.c release_region(res->start, resource_size(res)); start 277 drivers/watchdog/ie6xx_wdt.c release_region(res->start, resource_size(res)); start 171 drivers/watchdog/imgpdc_wdt.c .start = pdc_wdt_start, start 234 drivers/watchdog/imx2_wdt.c .start = imx2_wdt_start, start 126 drivers/watchdog/imx7ulp_wdt.c .start = imx7ulp_wdt_start, start 145 drivers/watchdog/imx_sc_wdt.c .start = imx_sc_wdt_start, start 103 drivers/watchdog/intel-mid_wdt.c .start = wdt_start, start 249 drivers/watchdog/it87_wdt.c .start = wdt_start, start 144 drivers/watchdog/jz4740_wdt.c .start = jz4740_wdt_start, start 429 drivers/watchdog/kempld_wdt.c .start = kempld_wdt_start, start 152 drivers/watchdog/lantiq_wdt.c .start = ltq_wdt_start, start 76 drivers/watchdog/loongson1_wdt.c .start = ls1x_wdt_start, start 192 drivers/watchdog/lpc18xx_wdt.c .start = lpc18xx_wdt_start, start 150 drivers/watchdog/max63xx_wdt.c .start = max63xx_wdt_start, start 104 drivers/watchdog/max77620_wdt.c .start = max77620_wdt_start, start 307 drivers/watchdog/mei_wdt.c .start = mei_wdt_ops_start, start 115 drivers/watchdog/mena21_wdt.c .start = a21_wdt_start, start 108 drivers/watchdog/menf21bmc_wdt.c .start = menf21bmc_wdt_start, start 95 drivers/watchdog/menz69_wdt.c .start = men_z069_wdt_start, start 123 drivers/watchdog/menz69_wdt.c drv->base = devm_ioremap(&dev->dev, mem->start, resource_size(mem)); start 97 drivers/watchdog/meson_gxbb_wdt.c .start = meson_gxbb_wdt_start, start 145 drivers/watchdog/meson_wdt.c .start = meson_wdt_start, start 159 drivers/watchdog/mlx_wdt.c .start = mlxreg_wdt_start, start 167 drivers/watchdog/mlx_wdt.c .start = mlxreg_wdt_start, start 84 drivers/watchdog/moxart_wdt.c .start = moxart_wdt_start, start 123 drivers/watchdog/mpc8xxx_wdt.c .start = mpc8xxx_wdt_start, start 161 drivers/watchdog/mpc8xxx_wdt.c u32 __iomem *rsr = ioremap(res->start, resource_size(res)); start 118 drivers/watchdog/mt7621_wdt.c .start = mt7621_wdt_start, start 147 drivers/watchdog/mtk_wdt.c .start = mtk_wdt_start, start 277 drivers/watchdog/mv64x60_wdt.c mv64x60_wdt_regs = devm_ioremap(&dev->dev, r->start, resource_size(r)); start 173 drivers/watchdog/ni903x_wdt.c .start = ni903x_wdd_start, start 161 drivers/watchdog/nic7018_wdt.c .start = nic7018_start, start 188 drivers/watchdog/nic7018_wdt.c if (!devm_request_region(dev, io_rc->start, resource_size(io_rc), start 194 drivers/watchdog/nic7018_wdt.c wdt->io_base = io_rc->start; start 173 drivers/watchdog/npcm_wdt.c .start = npcm_wdt_start, start 503 drivers/watchdog/octeon-wdt-main.c .start = octeon_wdt_start, start 124 drivers/watchdog/of_xilinx_wdt.c .start = xilinx_wdt_start, start 222 drivers/watchdog/omap_wdt.c .start = omap_wdt_start, start 67 drivers/watchdog/orion_wdt.c int (*start)(struct watchdog_device *); start 275 drivers/watchdog/orion_wdt.c return dev->data->start(wdt_dev); start 376 drivers/watchdog/orion_wdt.c .start = orion_wdt_start, start 412 drivers/watchdog/orion_wdt.c return devm_ioremap(&pdev->dev, res->start, start 427 drivers/watchdog/orion_wdt.c .start = orion_start, start 437 drivers/watchdog/orion_wdt.c .start = armada370_start, start 447 drivers/watchdog/orion_wdt.c .start = armada370_start, start 458 drivers/watchdog/orion_wdt.c .start = armada375_start, start 469 drivers/watchdog/orion_wdt.c .start = armada375_start, start 507 drivers/watchdog/orion_wdt.c dev->reg = devm_ioremap(&pdev->dev, res->start, start 515 drivers/watchdog/orion_wdt.c dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start & start 539 drivers/watchdog/orion_wdt.c dev->rstout_mask = devm_ioremap(&pdev->dev, res->start, start 151 drivers/watchdog/pic32-dmt.c .start = pic32_dmt_start, start 143 drivers/watchdog/pic32-wdt.c .start = pic32_wdt_start, start 126 drivers/watchdog/pm8916_wdt.c .start = pm8916_wdt_start, start 168 drivers/watchdog/pnx4008_wdt.c .start = pnx4008_wdt_start, start 151 drivers/watchdog/qcom-wdt.c .start = qcom_wdt_start, start 211 drivers/watchdog/qcom-wdt.c res->start += percpu_offset; start 220 drivers/watchdog/rave-sp-wdt.c .start = rave_sp_wdt_start, start 269 drivers/watchdog/rc32434_wdt.c wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); start 232 drivers/watchdog/rdc321x_wdt.c rdc321x_wdt_device.base_reg = r->start; start 146 drivers/watchdog/renesas_wdt.c .start = rwdt_start, start 95 drivers/watchdog/retu_wdt.c .start = retu_wdt_start, start 135 drivers/watchdog/rn5t618_wdt.c .start = rn5t618_wdt_start, start 127 drivers/watchdog/rt2880_wdt.c .start = rt288x_wdt_start, start 81 drivers/watchdog/rtd119x_wdt.c .start = rtd119x_wdt_start, start 160 drivers/watchdog/rza_wdt.c .start = rza_wdt_start, start 384 drivers/watchdog/s3c2410_wdt.c .start = s3c2410wdt_start, start 595 drivers/watchdog/s3c2410_wdt.c ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0, start 137 drivers/watchdog/sama5d4_wdt.c .start = sama5d4_wdt_start, start 212 drivers/watchdog/sbsa_gwdt.c .start = sbsa_gwdt_start, start 205 drivers/watchdog/shwdt.c .start = sh_wdt_start, start 131 drivers/watchdog/sirfsoc_wdt.c .start = sirfsoc_wdt_enable, start 118 drivers/watchdog/softdog.c .start = softdog_ping, start 367 drivers/watchdog/sp5100_tco.c .start = tco_timer_start, start 222 drivers/watchdog/sp805_wdt.c .start = wdt_enable, start 245 drivers/watchdog/sprd_wdt.c .start = sprd_wdt_start, start 134 drivers/watchdog/st_lpc_wdt.c .start = st_wdog_start, start 214 drivers/watchdog/stm32_iwdg.c .start = stm32_iwdg_start, start 58 drivers/watchdog/stmp3xxx_rtc_wdt.c .start = wdt_start, start 76 drivers/watchdog/stpmic1_wdt.c .start = pmic_wdt_start, start 78 drivers/watchdog/sun4v_wdt.c .start = sun4v_wdt_ping, start 198 drivers/watchdog/sunxi_wdt.c .start = sunxi_wdt_start, start 104 drivers/watchdog/tangox_wdt.c .start = tangox_wdt_start, start 175 drivers/watchdog/tegra_wdt.c .start = tegra_wdt_start, start 67 drivers/watchdog/tqmx86_wdt.c .start = tqmx86_wdt_start, start 86 drivers/watchdog/tqmx86_wdt.c priv->io_base = devm_ioport_map(dev, res->start, resource_size(res)); start 99 drivers/watchdog/ts4800_wdt.c .start = ts4800_wdt_start, start 117 drivers/watchdog/ts72xx_wdt.c .start = ts72xx_wdt_start, start 53 drivers/watchdog/twl4030_wdt.c .start = twl4030_wdt_start, start 90 drivers/watchdog/txx9wdt.c .start = txx9wdt_start, start 176 drivers/watchdog/uniphier_wdt.c .start = uniphier_watchdog_start, start 73 drivers/watchdog/ux500_wdt.c .start = ux500_wdt_start, start 145 drivers/watchdog/via_wdt.c .start = wdt_start, start 181 drivers/watchdog/via_wdt.c pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start); start 313 drivers/watchdog/w83627hf_wdt.c .start = wdt_start, start 212 drivers/watchdog/watchdog_core.c if (!wdd->ops->start || (!wdd->ops->stop && !wdd->max_hw_heartbeat_ms)) start 188 drivers/watchdog/watchdog_dev.c err = wdd->ops->start(wdd); /* restart watchdog */ start 281 drivers/watchdog/watchdog_dev.c err = wdd->ops->start(wdd); start 306 drivers/watchdog/wdat_wdt.c .start = wdat_wdt_start, start 362 drivers/watchdog/wdat_wdt.c reg = devm_ioport_map(dev, res->start, 1); start 398 drivers/watchdog/wdat_wdt.c r.start = gas->address; start 399 drivers/watchdog/wdat_wdt.c r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; start 414 drivers/watchdog/wdat_wdt.c instr->reg = regs[j] + r.start - res->start; start 175 drivers/watchdog/wm831x_wdt.c .start = wm831x_wdt_start, start 126 drivers/watchdog/wm8350_wdt.c .start = wm8350_wdt_start, start 107 drivers/watchdog/xen_wdt.c .start = xen_wdt_start, start 430 drivers/watchdog/ziirave_wdt.c .start = ziirave_wdt_start, start 158 drivers/watchdog/zx2967_wdt.c .start = zx2967_wdt_start, start 30 drivers/xen/arm-device.c xrp.gpfn = XEN_PFN_DOWN(r->start) + j; start 79 drivers/xen/arm-device.c gpfns[j] = XEN_PFN_DOWN(r->start) + j; start 80 drivers/xen/arm-device.c idxs[j] = XEN_PFN_DOWN(r->start) + j; start 273 drivers/xen/balloon.c unsigned long pfn = res->start >> PAGE_SHIFT; start 310 drivers/xen/balloon.c nid = memory_add_physaddr_to_nid(resource->start); start 331 drivers/xen/balloon.c pfn = PFN_DOWN(resource->start); start 479 drivers/xen/gntdev.c unsigned long start, unsigned long end) start 485 drivers/xen/gntdev.c if (map->vma->vm_end <= start) start 492 drivers/xen/gntdev.c unsigned long start, unsigned long end, start 498 drivers/xen/gntdev.c if (!in_range(map, start, end)) start 504 drivers/xen/gntdev.c mstart = max(start, map->vma->vm_start); start 509 drivers/xen/gntdev.c start, end, mstart, mend); start 531 drivers/xen/gntdev.c ret = unmap_if_in_range(map, range->start, range->end, start 537 drivers/xen/gntdev.c ret = unmap_if_in_range(map, range->start, range->end, start 236 drivers/xen/xen-pciback/conf_space_header.c bar->val = res[pos - 1].start >> 32; start 247 drivers/xen/xen-pciback/conf_space_header.c bar->val = res[pos].start | start 256 drivers/xen/xenbus/xenbus_probe_frontend.c static bool wait_loop(unsigned long start, unsigned int max_delay, start 259 drivers/xen/xenbus/xenbus_probe_frontend.c if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { start 290 drivers/xen/xenbus/xenbus_probe_frontend.c unsigned long start = jiffies; start 298 drivers/xen/xenbus/xenbus_probe_frontend.c if (wait_loop(start, 30, &seconds_waited)) start 303 drivers/xen/xenbus/xenbus_probe_frontend.c if (wait_loop(start, 270, &seconds_waited)) start 105 drivers/xen/xenfs/xensyms.c .start = xensyms_start, start 93 drivers/zorro/proc.c .start = zorro_seq_start, start 88 drivers/zorro/zorro.c static void __init mark_region(unsigned long start, unsigned long end, start 92 drivers/zorro/zorro.c start += Z2RAM_CHUNKMASK; start 95 drivers/zorro/zorro.c start &= ~Z2RAM_CHUNKMASK; start 98 drivers/zorro/zorro.c if (end <= Z2RAM_START || start >= Z2RAM_END) start 100 drivers/zorro/zorro.c start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START; start 102 drivers/zorro/zorro.c while (start < end) { start 103 drivers/zorro/zorro.c u32 chunk = start>>Z2RAM_CHUNKSHIFT; start 109 drivers/zorro/zorro.c start += Z2RAM_CHUNKSIZE; start 122 drivers/zorro/zorro.c if (zorro_resource_start(z) >= r->start && start 179 drivers/zorro/zorro.c z->resource.start = zi->boardaddr; start 63 fs/9p/v9fs_vfs.h int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, start 269 fs/9p/vfs_addr.c start: start 285 fs/9p/vfs_addr.c goto start; start 168 fs/9p/vfs_file.c flock.start = fl->fl_start; start 261 fs/9p/vfs_file.c glock.start = fl->fl_start; start 285 fs/9p/vfs_file.c fl->fl_start = glock.start; start 289 fs/9p/vfs_file.c fl->fl_end = glock.start + glock.length - 1; start 445 fs/9p/vfs_file.c static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, start 453 fs/9p/vfs_file.c retval = file_write_and_wait_range(filp, start, end); start 469 fs/9p/vfs_file.c int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, start 476 fs/9p/vfs_file.c retval = file_write_and_wait_range(filp, start, end); start 72 fs/adfs/map.c unsigned int start = dm->dm_startbit; start 77 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); start 78 fs/adfs/map.c mapptr = start + idlen; start 99 fs/adfs/map.c start = mapptr; start 105 fs/adfs/map.c frag, start, mapptr); start 110 fs/adfs/map.c int length = mapptr - start; start 116 fs/adfs/map.c return start + *offset; start 133 fs/adfs/map.c unsigned int start = 8, mapptr; start 140 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); start 150 fs/adfs/map.c start += frag; start 155 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); start 156 fs/adfs/map.c mapptr = start + idlen; start 174 fs/adfs/map.c total += mapptr - start; start 953 fs/affs/file.c int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) start 958 fs/affs/file.c err = file_write_and_wait_range(filp, start, end); start 76 fs/afs/proc.c .start = afs_proc_cells_start, start 253 fs/afs/proc.c .start = afs_proc_cell_volumes_start, start 364 fs/afs/proc.c .start = afs_proc_cell_vlservers_start, start 417 fs/afs/proc.c .start = afs_proc_servers_start, start 472 fs/afs/proc.c .start = afs_proc_sysname_start, start 164 fs/afs/rotate.c goto start; start 339 fs/afs/rotate.c start: start 107 fs/afs/vl_rotate.c goto start; start 164 fs/afs/vl_rotate.c start: start 460 fs/afs/write.c pgoff_t start, first, last; start 474 fs/afs/write.c start = primary_page->index; start 486 fs/afs/write.c if (start >= final_page || start 490 fs/afs/write.c start++; start 492 fs/afs/write.c _debug("more %lx [%lx]", start, count); start 493 fs/afs/write.c n = final_page - start + 1; start 496 fs/afs/write.c n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); start 500 fs/afs/write.c if (pages[0]->index != start) { start 548 fs/afs/write.c start += loop; start 549 fs/afs/write.c } while (start <= final_page && count < 65536); start 700 fs/afs/write.c pgoff_t start, end, next; start 706 fs/afs/write.c start = mapping->writeback_index; start 708 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, start, end, &next); start 709 fs/afs/write.c if (start > 0 && wbc->nr_to_write > 0 && ret == 0) start 710 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, 0, start, start 719 fs/afs/write.c start = wbc->range_start >> PAGE_SHIFT; start 721 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, start, end, &next); start 760 fs/afs/write.c int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 769 fs/afs/write.c return file_write_and_wait_range(file, start, end); start 123 fs/bad_inode.c struct fiemap_extent_info *fieinfo, u64 start, start 126 fs/befs/befs.h iaddr->start); start 135 fs/befs/befs.h iaddr.start = start 97 fs/befs/befs_fs_types.h fs16 start; start 103 fs/befs/befs_fs_types.h u16 start; start 266 fs/befs/datastream.c run->start = array[i].start + offset; start 346 fs/befs/datastream.c run->start = start 347 fs/befs/datastream.c fs16_to_cpu(sb, array[j].start) + offset; start 517 fs/befs/datastream.c run->start += offset; start 84 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, tmp_run.len); start 97 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, tmp_run.len); start 101 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, tmp_run.len); start 115 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, start 126 fs/befs/debug.c tmp_run.start, tmp_run.len); start 136 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, start 186 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, tmp_run.len); start 195 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, tmp_run.len); start 199 fs/befs/debug.c tmp_run.allocation_group, tmp_run.start, tmp_run.len); start 219 fs/befs/debug.c befs_debug(sb, "[%u, %hu, %hu]", n.allocation_group, n.start, n.len); start 78 fs/befs/endian.h run.start = le16_to_cpu((__force __le16)n.start); start 82 fs/befs/endian.h run.start = be16_to_cpu((__force __be16)n.start); start 95 fs/befs/endian.h run.start = cpu_to_le16(n.start); start 99 fs/befs/endian.h run.start = cpu_to_be16(n.start); start 35 fs/befs/io.c iaddr.start, iaddr.len); start 320 fs/befs/linuxvfs.c befs_ino->i_inode_num.start, befs_ino->i_inode_num.len); start 672 fs/befs/linuxvfs.c (unsigned long)befs_ino->i_parent.start); start 49 fs/bfs/file.c static int bfs_move_blocks(struct super_block *sb, unsigned long start, start 54 fs/bfs/file.c dprintf("%08lx-%08lx->%08lx\n", start, end, where); start 55 fs/bfs/file.c for (i = start; i <= end; i++) start 45 fs/binfmt_aout.c static int set_brk(unsigned long start, unsigned long end) start 47 fs/binfmt_aout.c start = PAGE_ALIGN(start); start 49 fs/binfmt_aout.c if (end > start) start 50 fs/binfmt_aout.c return vm_brk(start, end - start); start 102 fs/binfmt_elf.c static int set_brk(unsigned long start, unsigned long end, int prot) start 104 fs/binfmt_elf.c start = ELF_PAGEALIGN(start); start 106 fs/binfmt_elf.c if (end > start) { start 112 fs/binfmt_elf.c int error = vm_brk_flags(start, end - start, start 661 fs/block_dev.c int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) start 667 fs/block_dev.c error = file_write_and_wait_range(filp, start, end); start 2056 fs/block_dev.c static long blkdev_fallocate(struct file *file, int mode, loff_t start, start 2061 fs/block_dev.c loff_t end = start + len - 1; start 2071 fs/block_dev.c if (start >= isize) start 2075 fs/block_dev.c len = isize - start; start 2076 fs/block_dev.c end = start + len - 1; start 2084 fs/block_dev.c if ((start | len) & (bdev_logical_block_size(bdev) - 1)) start 2089 fs/block_dev.c truncate_inode_pages_range(mapping, start, end); start 2094 fs/block_dev.c error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, start 2098 fs/block_dev.c error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, start 2102 fs/block_dev.c error = blkdev_issue_discard(bdev, start >> 9, len >> 9, start 2117 fs/block_dev.c start >> PAGE_SHIFT, start 430 fs/btrfs/backref.c ret = ulist_add(parents, eb->start, 0, GFP_NOFS); start 474 fs/btrfs/backref.c ret = ulist_add_merge_ptr(parents, eb->start, start 196 fs/btrfs/block-group.c u64 end, start; start 205 fs/btrfs/block-group.c start = cache->key.objectid; start 207 fs/btrfs/block-group.c if (bytenr < start) { start 208 fs/btrfs/block-group.c if (!contains && (!ret || start < ret->key.objectid)) start 211 fs/btrfs/block-group.c } else if (bytenr > start) { start 323 fs/btrfs/block-group.c const u64 start) start 327 fs/btrfs/block-group.c bg = btrfs_lookup_block_group(fs_info, start); start 430 fs/btrfs/block-group.c u64 start = block_group->key.objectid; start 437 fs/btrfs/block-group.c btrfs_remove_free_space(block_group, start, chunk); start 438 fs/btrfs/block-group.c start += step; start 454 fs/btrfs/block-group.c u64 start, u64 end) start 460 fs/btrfs/block-group.c while (start < end) { start 461 fs/btrfs/block-group.c ret = find_first_extent_bit(info->pinned_extents, start, start 468 fs/btrfs/block-group.c if (extent_start <= start) { start 469 fs/btrfs/block-group.c start = extent_end + 1; start 470 fs/btrfs/block-group.c } else if (extent_start > start && extent_start < end) { start 471 fs/btrfs/block-group.c size = extent_start - start; start 473 fs/btrfs/block-group.c ret = btrfs_add_free_space(block_group, start, start 476 fs/btrfs/block-group.c start = extent_end + 1; start 482 fs/btrfs/block-group.c if (start < end) { start 483 fs/btrfs/block-group.c size = end - start; start 485 fs/btrfs/block-group.c ret = btrfs_add_free_space(block_group, start, size); start 1144 fs/btrfs/block-group.c ASSERT(em && em->start == chunk_offset); start 1263 fs/btrfs/block-group.c u64 start, end; start 1325 fs/btrfs/block-group.c start = block_group->key.objectid; start 1326 fs/btrfs/block-group.c end = start + block_group->key.offset - 1; start 1339 fs/btrfs/block-group.c ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, start 1346 fs/btrfs/block-group.c ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, start 1473 fs/btrfs/block-group.c } else if (em->start != found_key.objectid || start 1478 fs/btrfs/block-group.c em->start, em->len); start 1549 fs/btrfs/block-group.c u64 start, len; start 1558 fs/btrfs/block-group.c start = logical[nr]; start 1559 fs/btrfs/block-group.c if (start < cache->key.objectid) { start 1560 fs/btrfs/block-group.c start = cache->key.objectid; start 1561 fs/btrfs/block-group.c len = (logical[nr] + stripe_len) - start; start 1565 fs/btrfs/block-group.c cache->key.offset - start); start 1569 fs/btrfs/block-group.c ret = btrfs_add_excluded_extent(fs_info, start, len); start 1598 fs/btrfs/block-group.c struct btrfs_fs_info *fs_info, u64 start, u64 size) start 1613 fs/btrfs/block-group.c cache->key.objectid = start; start 1618 fs/btrfs/block-group.c cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); start 1647 fs/btrfs/block-group.c u64 start = 0; start 1657 fs/btrfs/block-group.c em = lookup_extent_mapping(map_tree, start, 1); start 1662 fs/btrfs/block-group.c bg = btrfs_lookup_block_group(fs_info, em->start); start 1666 fs/btrfs/block-group.c em->start, em->len); start 1671 fs/btrfs/block-group.c if (bg->key.objectid != em->start || start 1677 fs/btrfs/block-group.c em->start, em->len, start 1686 fs/btrfs/block-group.c start = em->start + em->len; start 182 fs/btrfs/block-group.h const u64 start); start 196 fs/btrfs/block-group.h u64 start, u64 end); start 209 fs/btrfs/check-integrity.c u64 start; /* virtual bytenr */ start 742 fs/btrfs/check-integrity.c tmp_next_block_ctx.start); start 976 fs/btrfs/check-integrity.c sf->block_ctx->start, sf->nr, start 1003 fs/btrfs/check-integrity.c sf->block_ctx->start, start 1099 fs/btrfs/check-integrity.c sf->block_ctx->start, start 1123 fs/btrfs/check-integrity.c sf->block_ctx->start, start 1207 fs/btrfs/check-integrity.c size_t start_offset = offset_in_page(block_ctx->start); start 1390 fs/btrfs/check-integrity.c block_ctx->start, block_ctx->dev->name); start 1410 fs/btrfs/check-integrity.c block_ctx->start, block_ctx->dev->name); start 1529 fs/btrfs/check-integrity.c block_ctx_out->start = 0; start 1548 fs/btrfs/check-integrity.c block_ctx_out->start = bytenr; start 1645 fs/btrfs/check-integrity.c block_ctx->start, block_ctx->dev->name); start 1893 fs/btrfs/check-integrity.c block_ctx.start = bytenr; start 2021 fs/btrfs/check-integrity.c block_ctx.start = bytenr; start 2656 fs/btrfs/check-integrity.c block->logical_bytenr = block_ctx->start; start 214 fs/btrfs/compression.c unsigned long index = cb->start >> PAGE_SHIFT; start 215 fs/btrfs/compression.c unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; start 275 fs/btrfs/compression.c cb->start, cb->start + cb->len - 1, start 309 fs/btrfs/compression.c blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, start 327 fs/btrfs/compression.c WARN_ON(!PAGE_ALIGNED(start)); start 334 fs/btrfs/compression.c cb->start = start; start 377 fs/btrfs/compression.c ret = btrfs_csum_one_bio(inode, bio, start, 1); start 408 fs/btrfs/compression.c ret = btrfs_csum_one_bio(inode, bio, start, 1); start 492 fs/btrfs/compression.c if (!em || last_offset < em->start || start 589 fs/btrfs/compression.c cb->start = em->orig_start; start 591 fs/btrfs/compression.c em_start = em->start; start 1033 fs/btrfs/compression.c u64 start, struct page **pages, start 1046 fs/btrfs/compression.c start, pages, start 1455 fs/btrfs/compression.c static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, start 1472 fs/btrfs/compression.c if (end - start > BTRFS_MAX_UNCOMPRESSED) start 1473 fs/btrfs/compression.c end = start + BTRFS_MAX_UNCOMPRESSED; start 1475 fs/btrfs/compression.c index = start >> PAGE_SHIFT; start 1487 fs/btrfs/compression.c i = start % PAGE_SIZE; start 1490 fs/btrfs/compression.c if (start > end - SAMPLING_READ_SIZE) start 1495 fs/btrfs/compression.c start += SAMPLING_INTERVAL; start 1522 fs/btrfs/compression.c int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) start 1532 fs/btrfs/compression.c heuristic_collect_sample(inode, start, end, ws); start 39 fs/btrfs/compression.h u64 start; start 81 fs/btrfs/compression.h u64 start, struct page **pages, start 91 fs/btrfs/compression.h blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, start 144 fs/btrfs/compression.h u64 start, start 177 fs/btrfs/compression.h int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end); start 241 fs/btrfs/ctree.c &disk_key, level, buf->start, 0); start 246 fs/btrfs/ctree.c btrfs_set_header_bytenr(cow, cow->start); start 478 fs/btrfs/ctree.c tm->logical = eb->start; start 539 fs/btrfs/ctree.c tm->logical = eb->start; start 648 fs/btrfs/ctree.c tm->logical = new_root->start; start 649 fs/btrfs/ctree.c tm->old_root.logical = old_root->start; start 681 fs/btrfs/ctree.c __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, start 694 fs/btrfs/ctree.c if (cur->logical < start) { start 696 fs/btrfs/ctree.c } else if (cur->logical > start) { start 728 fs/btrfs/ctree.c tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, start 731 fs/btrfs/ctree.c return __tree_mod_log_search(fs_info, start, min_seq, 1); start 740 fs/btrfs/ctree.c tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) start 742 fs/btrfs/ctree.c return __tree_mod_log_search(fs_info, start, min_seq, 0); start 915 fs/btrfs/ctree.c ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, start 970 fs/btrfs/ctree.c buf->start, start 1082 fs/btrfs/ctree.c parent_start = parent->start; start 1092 fs/btrfs/ctree.c btrfs_set_header_bytenr(cow, cow->start); start 1122 fs/btrfs/ctree.c parent_start = buf->start; start 1138 fs/btrfs/ctree.c cow->start); start 1169 fs/btrfs/ctree.c u64 root_logical = eb_root->start; start 1310 fs/btrfs/ctree.c tm = tree_mod_log_search(fs_info, eb->start, time_seq); start 1319 fs/btrfs/ctree.c eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); start 1325 fs/btrfs/ctree.c btrfs_set_header_bytenr(eb_rewin, eb->start); start 1382 fs/btrfs/ctree.c logical = eb_root->start; start 1417 fs/btrfs/ctree.c btrfs_set_header_bytenr(eb, eb->start); start 1512 fs/btrfs/ctree.c search_start = buf->start & ~((u64)SZ_1G - 1); start 1687 fs/btrfs/ctree.c search_start = cur->start; start 1688 fs/btrfs/ctree.c last_block = cur->start; start 1726 fs/btrfs/ctree.c __func__, low, high, eb->start, start 3407 fs/btrfs/ctree.c root->node->start, 0); start 3415 fs/btrfs/ctree.c btrfs_set_node_blockptr(c, 0, lower->start); start 3537 fs/btrfs/ctree.c c->start, 0); start 3560 fs/btrfs/ctree.c insert_ptr(trans, path, &disk_key, split->start, start 3581 fs/btrfs/ctree.c static int leaf_space_used(struct extent_buffer *l, int start, int nr) start 3588 fs/btrfs/ctree.c int end = min(nritems, start + nr) - 1; start 3593 fs/btrfs/ctree.c start_item = btrfs_item_nr(start); start 4121 fs/btrfs/ctree.c insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); start 4318 fs/btrfs/ctree.c l->start, 0); start 4328 fs/btrfs/ctree.c right->start, path->slots[1] + 1, 1); start 4337 fs/btrfs/ctree.c right->start, path->slots[1], 1); start 2397 fs/btrfs/ctree.h u64 start, u64 num_bytes); start 2404 fs/btrfs/ctree.h int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); start 2445 fs/btrfs/ctree.h u64 start, u64 len, int delalloc); start 2447 fs/btrfs/ctree.h u64 start, u64 len); start 2493 fs/btrfs/ctree.h u64 start, u64 end); start 2806 fs/btrfs/ctree.h int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, start 2816 fs/btrfs/ctree.h u64 start, u64 len); start 2842 fs/btrfs/ctree.h int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, start 2859 fs/btrfs/ctree.h void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end); start 2877 fs/btrfs/ctree.h u64 start, u64 end, int create); start 2891 fs/btrfs/ctree.h u64 start, u64 num_bytes, u64 min_size, start 2895 fs/btrfs/ctree.h u64 start, u64 num_bytes, u64 min_size, start 2898 fs/btrfs/ctree.h u64 start, u64 end, int *page_started, unsigned long *nr_written, start 2900 fs/btrfs/ctree.h int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); start 2901 fs/btrfs/ctree.h void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, start 2926 fs/btrfs/ctree.h int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); start 2927 fs/btrfs/ctree.h void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, start 2932 fs/btrfs/ctree.h struct btrfs_path *path, u64 start, u64 end, start 2938 fs/btrfs/ctree.h struct btrfs_root *root, struct inode *inode, u64 start, start 2941 fs/btrfs/ctree.h const u64 start, const u64 end, start 2945 fs/btrfs/ctree.h struct btrfs_inode *inode, u64 start, u64 end); start 2950 fs/btrfs/ctree.h int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); start 3358 fs/btrfs/ctree.h int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, start 3394 fs/btrfs/ctree.h struct btrfs_key *start, struct btrfs_key *end); start 139 fs/btrfs/delalloc-space.c struct extent_changeset **reserved, u64 start, u64 len) start 145 fs/btrfs/delalloc-space.c len = round_up(start + len, fs_info->sectorsize) - start 146 fs/btrfs/delalloc-space.c round_down(start, fs_info->sectorsize); start 147 fs/btrfs/delalloc-space.c start = round_down(start, fs_info->sectorsize); start 154 fs/btrfs/delalloc-space.c ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); start 156 fs/btrfs/delalloc-space.c btrfs_free_reserved_data_space_noquota(inode, start, len); start 170 fs/btrfs/delalloc-space.c void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, start 177 fs/btrfs/delalloc-space.c len = round_up(start + len, fs_info->sectorsize) - start 178 fs/btrfs/delalloc-space.c round_down(start, fs_info->sectorsize); start 179 fs/btrfs/delalloc-space.c start = round_down(start, fs_info->sectorsize); start 195 fs/btrfs/delalloc-space.c struct extent_changeset *reserved, u64 start, u64 len) start 200 fs/btrfs/delalloc-space.c len = round_up(start + len, root->fs_info->sectorsize) - start 201 fs/btrfs/delalloc-space.c round_down(start, root->fs_info->sectorsize); start 202 fs/btrfs/delalloc-space.c start = round_down(start, root->fs_info->sectorsize); start 204 fs/btrfs/delalloc-space.c btrfs_free_reserved_data_space_noquota(inode, start, len); start 205 fs/btrfs/delalloc-space.c btrfs_qgroup_free_data(inode, reserved, start, len); start 470 fs/btrfs/delalloc-space.c struct extent_changeset **reserved, u64 start, u64 len) start 474 fs/btrfs/delalloc-space.c ret = btrfs_check_data_free_space(inode, reserved, start, len); start 479 fs/btrfs/delalloc-space.c btrfs_free_reserved_data_space(inode, *reserved, start, len); start 497 fs/btrfs/delalloc-space.c u64 start, u64 len, bool qgroup_free) start 500 fs/btrfs/delalloc-space.c btrfs_free_reserved_data_space(inode, reserved, start, len); start 10 fs/btrfs/delalloc-space.h struct extent_changeset **reserved, u64 start, u64 len); start 12 fs/btrfs/delalloc-space.h struct extent_changeset *reserved, u64 start, u64 len); start 15 fs/btrfs/delalloc-space.h u64 start, u64 len, bool qgroup_free); start 16 fs/btrfs/delalloc-space.h void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, start 21 fs/btrfs/delalloc-space.h struct extent_changeset **reserved, u64 start, u64 len); start 521 fs/btrfs/dev-replace.c switch (args->start.cont_reading_from_srcdev_mode) { start 529 fs/btrfs/dev-replace.c if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') || start 530 fs/btrfs/dev-replace.c args->start.tgtdev_name[0] == '\0') start 533 fs/btrfs/dev-replace.c ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name, start 534 fs/btrfs/dev-replace.c args->start.srcdevid, start 535 fs/btrfs/dev-replace.c args->start.srcdev_name, start 536 fs/btrfs/dev-replace.c args->start.cont_reading_from_srcdev_mode); start 730 fs/btrfs/dev-replace.c u64 start = 0; start 735 fs/btrfs/dev-replace.c em = lookup_extent_mapping(em_tree, start, (u64)-1); start 742 fs/btrfs/dev-replace.c start = em->start + em->len; start 744 fs/btrfs/dev-replace.c } while (start); start 427 fs/btrfs/dir-item.c unsigned long start; start 429 fs/btrfs/dir-item.c start = btrfs_item_ptr_offset(leaf, path->slots[0]); start 431 fs/btrfs/dir-item.c item_len - (ptr + sub_item_len - start)); start 205 fs/btrfs/disk-io.c struct page *page, size_t pg_offset, u64 start, u64 len, start 214 fs/btrfs/disk-io.c em = lookup_extent_mapping(em_tree, start, len); start 227 fs/btrfs/disk-io.c em->start = 0; start 237 fs/btrfs/disk-io.c em = lookup_extent_mapping(em_tree, start, len); start 320 fs/btrfs/disk-io.c lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, start 329 fs/btrfs/disk-io.c eb->start, start 344 fs/btrfs/disk-io.c unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, start 405 fs/btrfs/disk-io.c eb->start, level, found_level); start 425 fs/btrfs/disk-io.c eb->start); start 441 fs/btrfs/disk-io.c eb->start, parent_transid, first_key->objectid, start 485 fs/btrfs/disk-io.c eb->start, eb->len); start 515 fs/btrfs/disk-io.c u64 start = page_offset(page); start 531 fs/btrfs/disk-io.c if (WARN_ON(found_start != start)) start 550 fs/btrfs/disk-io.c eb->start); start 591 fs/btrfs/disk-io.c u64 start, u64 end, int mirror) start 624 fs/btrfs/disk-io.c if (found_start != eb->start) { start 626 fs/btrfs/disk-io.c eb->start, found_start); start 632 fs/btrfs/disk-io.c eb->start); start 639 fs/btrfs/disk-io.c (int)btrfs_header_level(eb), eb->start); start 660 fs/btrfs/disk-io.c fs_info->sb->s_id, eb->start, start 684 fs/btrfs/disk-io.c eb->start); start 1264 fs/btrfs/disk-io.c btrfs_set_root_bytenr(&root->root_item, leaf->start); start 1869 fs/btrfs/disk-io.c btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); start 1876 fs/btrfs/disk-io.c btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); start 1882 fs/btrfs/disk-io.c btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); start 1894 fs/btrfs/disk-io.c info->fs_root->node->start); start 1901 fs/btrfs/disk-io.c btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); start 1907 fs/btrfs/disk-io.c btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); start 4145 fs/btrfs/disk-io.c buf->start, transid, fs_info->generation); start 4388 fs/btrfs/disk-io.c u64 start = 0; start 4392 fs/btrfs/disk-io.c ret = find_first_extent_bit(dirty_pages, start, &start, &end, start 4397 fs/btrfs/disk-io.c clear_extent_bits(dirty_pages, start, end, mark); start 4398 fs/btrfs/disk-io.c while (start <= end) { start 4399 fs/btrfs/disk-io.c eb = find_extent_buffer(fs_info, start); start 4400 fs/btrfs/disk-io.c start += fs_info->nodesize; start 4419 fs/btrfs/disk-io.c u64 start; start 4436 fs/btrfs/disk-io.c ret = find_first_extent_bit(unpin, 0, &start, &end, start 4443 fs/btrfs/disk-io.c clear_extent_dirty(unpin, start, end, &cached_state); start 4445 fs/btrfs/disk-io.c btrfs_error_unpin_extent_range(fs_info, start, end); start 33 fs/btrfs/disk-io.h u64 start = SZ_16K; start 35 fs/btrfs/disk-io.h return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror); start 137 fs/btrfs/disk-io.h struct page *page, size_t pg_offset, u64 start, u64 len, start 63 fs/btrfs/extent-tree.c u64 start, u64 num_bytes) start 65 fs/btrfs/extent-tree.c u64 end = start + num_bytes - 1; start 67 fs/btrfs/extent-tree.c start, end, EXTENT_UPTODATE); start 69 fs/btrfs/extent-tree.c start, end, EXTENT_UPTODATE); start 76 fs/btrfs/extent-tree.c u64 start, end; start 78 fs/btrfs/extent-tree.c start = cache->key.objectid; start 79 fs/btrfs/extent-tree.c end = start + cache->key.offset - 1; start 82 fs/btrfs/extent-tree.c start, end, EXTENT_UPTODATE); start 84 fs/btrfs/extent-tree.c start, end, EXTENT_UPTODATE); start 123 fs/btrfs/extent-tree.c int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) start 133 fs/btrfs/extent-tree.c key.objectid = start; start 435 fs/btrfs/extent-tree.c eb->start, type); start 1234 fs/btrfs/extent-tree.c static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, start 1239 fs/btrfs/extent-tree.c u64 aligned_start = ALIGN(start, 1 << 9); start 1241 fs/btrfs/extent-tree.c if (WARN_ON(start != aligned_start)) { start 1242 fs/btrfs/extent-tree.c len -= aligned_start - start; start 1244 fs/btrfs/extent-tree.c start = aligned_start; start 1252 fs/btrfs/extent-tree.c end = start + len; start 1259 fs/btrfs/extent-tree.c u64 size = sb_start - start; start 1261 fs/btrfs/extent-tree.c if (!in_range(sb_start, start, bytes_left) && start 1262 fs/btrfs/extent-tree.c !in_range(sb_end, start, bytes_left) && start 1263 fs/btrfs/extent-tree.c !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) start 1270 fs/btrfs/extent-tree.c if (sb_start <= start) { start 1271 fs/btrfs/extent-tree.c start += sb_end - start; start 1272 fs/btrfs/extent-tree.c if (start > end) { start 1276 fs/btrfs/extent-tree.c bytes_left = end - start; start 1281 fs/btrfs/extent-tree.c ret = blkdev_issue_discard(bdev, start >> 9, size >> 9, start 1289 fs/btrfs/extent-tree.c start = sb_end; start 1290 fs/btrfs/extent-tree.c if (start > end) { start 1294 fs/btrfs/extent-tree.c bytes_left = end - start; start 1298 fs/btrfs/extent-tree.c ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9, start 1987 fs/btrfs/extent-tree.c ktime_t start = ktime_get(); start 2058 fs/btrfs/extent-tree.c u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); start 2448 fs/btrfs/extent-tree.c parent = buf->start; start 2640 fs/btrfs/extent-tree.c u64 start, u64 num_bytes) start 2646 fs/btrfs/extent-tree.c block_group = btrfs_lookup_block_group(fs_info, start); start 2656 fs/btrfs/extent-tree.c ret = btrfs_remove_free_space(block_group, start, num_bytes); start 2660 fs/btrfs/extent-tree.c if (start >= caching_ctl->progress) { start 2661 fs/btrfs/extent-tree.c ret = btrfs_add_excluded_extent(fs_info, start, start 2663 fs/btrfs/extent-tree.c } else if (start + num_bytes <= caching_ctl->progress) { start 2665 fs/btrfs/extent-tree.c start, num_bytes); start 2667 fs/btrfs/extent-tree.c num_bytes = caching_ctl->progress - start; start 2669 fs/btrfs/extent-tree.c start, num_bytes); start 2673 fs/btrfs/extent-tree.c num_bytes = (start + num_bytes) - start 2675 fs/btrfs/extent-tree.c start = caching_ctl->progress; start 2676 fs/btrfs/extent-tree.c ret = btrfs_add_excluded_extent(fs_info, start, start 2785 fs/btrfs/extent-tree.c u64 start, u64 end, start 2797 fs/btrfs/extent-tree.c while (start <= end) { start 2800 fs/btrfs/extent-tree.c start >= cache->key.objectid + cache->key.offset) { start 2804 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, start); start 2813 fs/btrfs/extent-tree.c len = cache->key.objectid + cache->key.offset - start; start 2814 fs/btrfs/extent-tree.c len = min(len, end + 1 - start); start 2816 fs/btrfs/extent-tree.c if (start < cache->last_byte_to_unpin) { start 2817 fs/btrfs/extent-tree.c len = min(len, cache->last_byte_to_unpin - start); start 2819 fs/btrfs/extent-tree.c btrfs_add_free_space(cache, start, len); start 2822 fs/btrfs/extent-tree.c start += len; start 2886 fs/btrfs/extent-tree.c u64 start; start 2899 fs/btrfs/extent-tree.c ret = find_first_extent_bit(unpin, 0, &start, &end, start 2907 fs/btrfs/extent-tree.c ret = btrfs_discard_extent(fs_info, start, start 2908 fs/btrfs/extent-tree.c end + 1 - start, NULL); start 2910 fs/btrfs/extent-tree.c clear_extent_dirty(unpin, start, end, &cached_state); start 2911 fs/btrfs/extent-tree.c unpin_extent_range(fs_info, start, end, true); start 3251 fs/btrfs/extent-tree.c buf->start, buf->len, parent); start 3269 fs/btrfs/extent-tree.c ret = check_ref_cleanup(trans, buf->start); start 3275 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, buf->start); start 3278 fs/btrfs/extent-tree.c pin_down_extent(cache, buf->start, buf->len, 1); start 3285 fs/btrfs/extent-tree.c btrfs_add_free_space(cache, buf->start, buf->len); start 3288 fs/btrfs/extent-tree.c trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); start 4150 fs/btrfs/extent-tree.c u64 start, u64 len, start 4156 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, start); start 4159 fs/btrfs/extent-tree.c start); start 4164 fs/btrfs/extent-tree.c pin_down_extent(cache, start, len, 1); start 4167 fs/btrfs/extent-tree.c ret = btrfs_discard_extent(fs_info, start, len, NULL); start 4168 fs/btrfs/extent-tree.c btrfs_add_free_space(cache, start, len); start 4170 fs/btrfs/extent-tree.c trace_btrfs_reserved_extent_free(fs_info, start, len); start 4178 fs/btrfs/extent-tree.c u64 start, u64 len, int delalloc) start 4180 fs/btrfs/extent-tree.c return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc); start 4184 fs/btrfs/extent-tree.c u64 start, u64 len) start 4186 fs/btrfs/extent-tree.c return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0); start 4439 fs/btrfs/extent-tree.c buf->start, btrfs_header_owner(buf), current->pid); start 4454 fs/btrfs/extent-tree.c btrfs_set_header_bytenr(buf, buf->start); start 4467 fs/btrfs/extent-tree.c set_extent_dirty(&root->dirty_log_pages, buf->start, start 4468 fs/btrfs/extent-tree.c buf->start + buf->len - 1, GFP_NOFS); start 4470 fs/btrfs/extent-tree.c set_extent_new(&root->dirty_log_pages, buf->start, start 4471 fs/btrfs/extent-tree.c buf->start + buf->len - 1); start 4474 fs/btrfs/extent-tree.c set_extent_dirty(&trans->transaction->dirty_pages, buf->start, start 4475 fs/btrfs/extent-tree.c buf->start + buf->len - 1, GFP_NOFS); start 4707 fs/btrfs/extent-tree.c eb->start, level, 1, start 4734 fs/btrfs/extent-tree.c ret = btrfs_set_disk_extent_flags(trans, eb->start, start 4920 fs/btrfs/extent-tree.c parent = path->nodes[level]->start; start 5042 fs/btrfs/extent-tree.c eb->start, level, 1, start 5090 fs/btrfs/extent-tree.c parent = eb->start; start 5095 fs/btrfs/extent-tree.c parent = path->nodes[level + 1]->start; start 5283 fs/btrfs/extent-tree.c path->nodes[level]->start, start 5532 fs/btrfs/extent-tree.c u64 start, u64 end) start 5534 fs/btrfs/extent-tree.c return unpin_extent_range(fs_info, start, end, false); start 5559 fs/btrfs/extent-tree.c u64 start = SZ_1M, len = 0, end = 0; start 5586 fs/btrfs/extent-tree.c find_first_clear_extent_bit(&device->alloc_state, start, start 5587 fs/btrfs/extent-tree.c &start, &end, start 5591 fs/btrfs/extent-tree.c start = max_t(u64, start, SZ_1M); start 5600 fs/btrfs/extent-tree.c len = end - start + 1; start 5609 fs/btrfs/extent-tree.c ret = btrfs_issue_discard(device->bdev, start, len, start 5612 fs/btrfs/extent-tree.c set_extent_bits(&device->alloc_state, start, start 5613 fs/btrfs/extent-tree.c start + bytes - 1, start 5620 fs/btrfs/extent-tree.c start += len; start 5650 fs/btrfs/extent-tree.c u64 start; start 5664 fs/btrfs/extent-tree.c check_add_overflow(range->start, range->len, &range_end)) start 5667 fs/btrfs/extent-tree.c cache = btrfs_lookup_first_block_group(fs_info, range->start); start 5674 fs/btrfs/extent-tree.c start = max(range->start, cache->key.objectid); start 5677 fs/btrfs/extent-tree.c if (end - start >= range->minlen) { start 5694 fs/btrfs/extent-tree.c start, start 71 fs/btrfs/extent_io.c state->start, state->end, state->state, start 81 fs/btrfs/extent_io.c eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); start 87 fs/btrfs/extent_io.c #define btrfs_debug_check_extent_io_range(tree, start, end) \ start 88 fs/btrfs/extent_io.c __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) start 90 fs/btrfs/extent_io.c struct extent_io_tree *tree, u64 start, u64 end) start 102 fs/btrfs/extent_io.c caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); start 113 fs/btrfs/extent_io.c u64 start; start 142 fs/btrfs/extent_io.c changeset->bytes_changed += state->end - state->start + 1; start 143 fs/btrfs/extent_io.c ret = ulist_add(&changeset->range_changed, state->start, state->end, start 348 fs/btrfs/extent_io.c if (offset < entry->start) start 398 fs/btrfs/extent_io.c if (offset < entry->start) start 423 fs/btrfs/extent_io.c while (prev && offset < prev_entry->start) { start 474 fs/btrfs/extent_io.c if (other->end == state->start - 1 && start 480 fs/btrfs/extent_io.c state->start = other->start; start 489 fs/btrfs/extent_io.c if (other->start == state->end + 1 && start 518 fs/btrfs/extent_io.c struct extent_state *state, u64 start, u64 end, start 525 fs/btrfs/extent_io.c if (end < start) { start 527 fs/btrfs/extent_io.c "insert state: end < start %llu %llu", end, start); start 530 fs/btrfs/extent_io.c state->start = start; start 541 fs/btrfs/extent_io.c found->start, found->end, start, end); start 570 fs/btrfs/extent_io.c prealloc->start = orig->start; start 573 fs/btrfs/extent_io.c orig->start = split; start 610 fs/btrfs/extent_io.c u64 range = state->end - state->start + 1; start 668 fs/btrfs/extent_io.c int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 681 fs/btrfs/extent_io.c btrfs_debug_check_extent_io_range(tree, start, end); start 682 fs/btrfs/extent_io.c trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); start 714 fs/btrfs/extent_io.c cached->start <= start && cached->end > start) { start 727 fs/btrfs/extent_io.c node = tree_search(tree, start); start 732 fs/btrfs/extent_io.c if (state->start > end) start 734 fs/btrfs/extent_io.c WARN_ON(state->end < start); start 759 fs/btrfs/extent_io.c if (state->start < start) { start 762 fs/btrfs/extent_io.c err = split_state(tree, state, prealloc, start); start 782 fs/btrfs/extent_io.c if (state->start <= end && state->end > end) { start 802 fs/btrfs/extent_io.c start = last_end + 1; start 803 fs/btrfs/extent_io.c if (start <= end && state && !need_resched()) start 807 fs/btrfs/extent_io.c if (start > end) start 841 fs/btrfs/extent_io.c static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 847 fs/btrfs/extent_io.c btrfs_debug_check_extent_io_range(tree, start, end); start 856 fs/btrfs/extent_io.c node = tree_search(tree, start); start 863 fs/btrfs/extent_io.c if (state->start > end) start 867 fs/btrfs/extent_io.c start = state->start; start 873 fs/btrfs/extent_io.c start = state->end + 1; start 875 fs/btrfs/extent_io.c if (start > end) start 898 fs/btrfs/extent_io.c u64 range = state->end - state->start + 1; start 937 fs/btrfs/extent_io.c __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 951 fs/btrfs/extent_io.c btrfs_debug_check_extent_io_range(tree, start, end); start 952 fs/btrfs/extent_io.c trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); start 969 fs/btrfs/extent_io.c if (state->start <= start && state->end > start && start 979 fs/btrfs/extent_io.c node = tree_search_for_insert(tree, start, &p, &parent); start 983 fs/btrfs/extent_io.c err = insert_state(tree, prealloc, start, end, start 994 fs/btrfs/extent_io.c last_start = state->start; start 1003 fs/btrfs/extent_io.c if (state->start == start && state->end <= end) { start 1005 fs/btrfs/extent_io.c *failed_start = state->start; start 1015 fs/btrfs/extent_io.c start = last_end + 1; start 1017 fs/btrfs/extent_io.c if (start < end && state && state->start == start && start 1039 fs/btrfs/extent_io.c if (state->start < start) { start 1041 fs/btrfs/extent_io.c *failed_start = start; start 1048 fs/btrfs/extent_io.c err = split_state(tree, state, prealloc, start); start 1061 fs/btrfs/extent_io.c start = last_end + 1; start 1063 fs/btrfs/extent_io.c if (start < end && state && state->start == start && start 1076 fs/btrfs/extent_io.c if (state->start > start) { start 1090 fs/btrfs/extent_io.c err = insert_state(tree, prealloc, start, this_end, start 1097 fs/btrfs/extent_io.c start = this_end + 1; start 1106 fs/btrfs/extent_io.c if (state->start <= end && state->end > end) { start 1108 fs/btrfs/extent_io.c *failed_start = start; start 1127 fs/btrfs/extent_io.c if (start > end) start 1143 fs/btrfs/extent_io.c int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 1147 fs/btrfs/extent_io.c return __set_extent_bit(tree, start, end, bits, 0, failed_start, start 1170 fs/btrfs/extent_io.c int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 1184 fs/btrfs/extent_io.c btrfs_debug_check_extent_io_range(tree, start, end); start 1185 fs/btrfs/extent_io.c trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, start 1205 fs/btrfs/extent_io.c if (state->start <= start && state->end > start && start 1216 fs/btrfs/extent_io.c node = tree_search_for_insert(tree, start, &p, &parent); start 1223 fs/btrfs/extent_io.c err = insert_state(tree, prealloc, start, end, start 1233 fs/btrfs/extent_io.c last_start = state->start; start 1242 fs/btrfs/extent_io.c if (state->start == start && state->end <= end) { start 1248 fs/btrfs/extent_io.c start = last_end + 1; start 1249 fs/btrfs/extent_io.c if (start < end && state && state->start == start && start 1271 fs/btrfs/extent_io.c if (state->start < start) { start 1277 fs/btrfs/extent_io.c err = split_state(tree, state, prealloc, start); start 1290 fs/btrfs/extent_io.c start = last_end + 1; start 1291 fs/btrfs/extent_io.c if (start < end && state && state->start == start && start 1304 fs/btrfs/extent_io.c if (state->start > start) { start 1321 fs/btrfs/extent_io.c err = insert_state(tree, prealloc, start, this_end, start 1327 fs/btrfs/extent_io.c start = this_end + 1; start 1336 fs/btrfs/extent_io.c if (state->start <= end && state->end > end) { start 1355 fs/btrfs/extent_io.c if (start > end) start 1371 fs/btrfs/extent_io.c int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, start 1382 fs/btrfs/extent_io.c return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, start 1386 fs/btrfs/extent_io.c int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, start 1389 fs/btrfs/extent_io.c return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, start 1393 fs/btrfs/extent_io.c int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 1397 fs/btrfs/extent_io.c return __clear_extent_bit(tree, start, end, bits, wake, delete, start 1401 fs/btrfs/extent_io.c int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, start 1410 fs/btrfs/extent_io.c return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS, start 1418 fs/btrfs/extent_io.c int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, start 1425 fs/btrfs/extent_io.c err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, start 1430 fs/btrfs/extent_io.c start = failed_start; start 1433 fs/btrfs/extent_io.c WARN_ON(start > end); start 1438 fs/btrfs/extent_io.c int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) start 1443 fs/btrfs/extent_io.c err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, start 1446 fs/btrfs/extent_io.c if (failed_start > start) start 1447 fs/btrfs/extent_io.c clear_extent_bit(tree, start, failed_start - 1, start 1454 fs/btrfs/extent_io.c void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) start 1456 fs/btrfs/extent_io.c unsigned long index = start >> PAGE_SHIFT; start 1469 fs/btrfs/extent_io.c void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) start 1471 fs/btrfs/extent_io.c unsigned long index = start >> PAGE_SHIFT; start 1491 fs/btrfs/extent_io.c u64 start, unsigned bits) start 1500 fs/btrfs/extent_io.c node = tree_search(tree, start); start 1506 fs/btrfs/extent_io.c if (state->end >= start && (state->state & bits)) start 1524 fs/btrfs/extent_io.c int find_first_extent_bit(struct extent_io_tree *tree, u64 start, start 1534 fs/btrfs/extent_io.c if (state->end == start - 1 && extent_state_in_tree(state)) { start 1547 fs/btrfs/extent_io.c state = find_first_extent_bit_state(tree, start, bits); start 1551 fs/btrfs/extent_io.c *start_ret = state->start; start 1575 fs/btrfs/extent_io.c void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, start 1585 fs/btrfs/extent_io.c node = __etree_search(tree, start, &next, &prev, NULL, NULL); start 1612 fs/btrfs/extent_io.c if (in_range(start, state->start, state->end - state->start + 1)) { start 1619 fs/btrfs/extent_io.c start = state->end + 1; start 1630 fs/btrfs/extent_io.c *start_ret = state->start; start 1662 fs/btrfs/extent_io.c if (state->end >= start && !(state->state & bits)) { start 1665 fs/btrfs/extent_io.c *end_ret = state->start - 1; start 1684 fs/btrfs/extent_io.c u64 *start, u64 *end, u64 max_bytes, start 1689 fs/btrfs/extent_io.c u64 cur_start = *start; start 1707 fs/btrfs/extent_io.c if (found && (state->start != cur_start || start 1717 fs/btrfs/extent_io.c *start = state->start; start 1725 fs/btrfs/extent_io.c total_bytes += state->end - state->start + 1; start 1743 fs/btrfs/extent_io.c u64 start, u64 end) start 1745 fs/btrfs/extent_io.c unsigned long index = start >> PAGE_SHIFT; start 1787 fs/btrfs/extent_io.c struct page *locked_page, u64 *start, start 1801 fs/btrfs/extent_io.c delalloc_start = *start; start 1805 fs/btrfs/extent_io.c if (!found || delalloc_end <= *start) { start 1806 fs/btrfs/extent_io.c *start = delalloc_start; start 1817 fs/btrfs/extent_io.c if (delalloc_start < *start) start 1818 fs/btrfs/extent_io.c delalloc_start = *start; start 1861 fs/btrfs/extent_io.c *start = delalloc_start; start 1944 fs/btrfs/extent_io.c void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, start 1949 fs/btrfs/extent_io.c clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0, start 1953 fs/btrfs/extent_io.c start >> PAGE_SHIFT, end >> PAGE_SHIFT, start 1963 fs/btrfs/extent_io.c u64 *start, u64 search_end, u64 max_bytes, start 1968 fs/btrfs/extent_io.c u64 cur_start = *start; start 1991 fs/btrfs/extent_io.c if (state->start > search_end) start 1993 fs/btrfs/extent_io.c if (contig && found && state->start > last + 1) start 1997 fs/btrfs/extent_io.c max(cur_start, state->start); start 2001 fs/btrfs/extent_io.c *start = max(cur_start, state->start); start 2021 fs/btrfs/extent_io.c static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start, start 2033 fs/btrfs/extent_io.c node = tree_search(tree, start); start 2039 fs/btrfs/extent_io.c if (state->start != start) { start 2049 fs/btrfs/extent_io.c static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start, start 2061 fs/btrfs/extent_io.c node = tree_search(tree, start); start 2067 fs/btrfs/extent_io.c if (state->start != start) { start 2083 fs/btrfs/extent_io.c int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, start 2091 fs/btrfs/extent_io.c if (cached && extent_state_in_tree(cached) && cached->start <= start && start 2092 fs/btrfs/extent_io.c cached->end > start) start 2095 fs/btrfs/extent_io.c node = tree_search(tree, start); start 2096 fs/btrfs/extent_io.c while (node && start <= end) { start 2099 fs/btrfs/extent_io.c if (filled && state->start > start) { start 2104 fs/btrfs/extent_io.c if (state->start > end) start 2119 fs/btrfs/extent_io.c start = state->end + 1; start 2120 fs/btrfs/extent_io.c if (start > end) start 2139 fs/btrfs/extent_io.c u64 start = page_offset(page); start 2140 fs/btrfs/extent_io.c u64 end = start + PAGE_SIZE - 1; start 2141 fs/btrfs/extent_io.c if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) start 2152 fs/btrfs/extent_io.c set_state_failrec(failure_tree, rec->start, NULL); start 2153 fs/btrfs/extent_io.c ret = clear_extent_bits(failure_tree, rec->start, start 2154 fs/btrfs/extent_io.c rec->start + rec->len - 1, start 2159 fs/btrfs/extent_io.c ret = clear_extent_bits(io_tree, rec->start, start 2160 fs/btrfs/extent_io.c rec->start + rec->len - 1, start 2179 fs/btrfs/extent_io.c int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, start 2253 fs/btrfs/extent_io.c ino, start, start 2263 fs/btrfs/extent_io.c u64 start = eb->start; start 2273 fs/btrfs/extent_io.c ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p, start 2274 fs/btrfs/extent_io.c start - page_offset(p), mirror_num); start 2277 fs/btrfs/extent_io.c start += PAGE_SIZE; start 2289 fs/btrfs/extent_io.c struct extent_io_tree *io_tree, u64 start, start 2304 fs/btrfs/extent_io.c ret = get_state_failrec(failure_tree, start, &failrec); start 2314 fs/btrfs/extent_io.c failrec->start); start 2322 fs/btrfs/extent_io.c failrec->start, start 2326 fs/btrfs/extent_io.c if (state && state->start <= failrec->start && start 2327 fs/btrfs/extent_io.c state->end >= failrec->start + failrec->len - 1) { start 2331 fs/btrfs/extent_io.c repair_io_failure(fs_info, ino, start, failrec->len, start 2349 fs/btrfs/extent_io.c void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) start 2359 fs/btrfs/extent_io.c state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY); start 2361 fs/btrfs/extent_io.c if (state->start > end) start 2377 fs/btrfs/extent_io.c int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, start 2389 fs/btrfs/extent_io.c ret = get_state_failrec(failure_tree, start, &failrec); start 2395 fs/btrfs/extent_io.c failrec->start = start; start 2396 fs/btrfs/extent_io.c failrec->len = end - start + 1; start 2402 fs/btrfs/extent_io.c em = lookup_extent_mapping(em_tree, start, failrec->len); start 2409 fs/btrfs/extent_io.c if (em->start > start || em->start + em->len <= start) { start 2419 fs/btrfs/extent_io.c logical = start - em->start; start 2430 fs/btrfs/extent_io.c logical, start, failrec->len); start 2436 fs/btrfs/extent_io.c ret = set_extent_bits(failure_tree, start, end, start 2439 fs/btrfs/extent_io.c ret = set_state_failrec(failure_tree, start, failrec); start 2442 fs/btrfs/extent_io.c ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); start 2450 fs/btrfs/extent_io.c failrec->logical, failrec->start, failrec->len, start 2568 fs/btrfs/extent_io.c struct page *page, u64 start, u64 end, start 2583 fs/btrfs/extent_io.c ret = btrfs_get_io_failure_record(inode, start, end, &failrec); start 2598 fs/btrfs/extent_io.c start - page_offset(page), start 2620 fs/btrfs/extent_io.c void end_extent_writepage(struct page *page, int err, u64 start, u64 end) start 2625 fs/btrfs/extent_io.c btrfs_writepage_endio_finish_ordered(page, start, end, uptodate); start 2648 fs/btrfs/extent_io.c u64 start; start 2674 fs/btrfs/extent_io.c start = page_offset(page); start 2675 fs/btrfs/extent_io.c end = start + bvec->bv_offset + bvec->bv_len - 1; start 2677 fs/btrfs/extent_io.c end_extent_writepage(page, error, start, end); start 2685 fs/btrfs/extent_io.c endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, start 2689 fs/btrfs/extent_io.c u64 end = start + len - 1; start 2692 fs/btrfs/extent_io.c set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); start 2693 fs/btrfs/extent_io.c unlock_extent_cached_atomic(tree, start, end, &cached); start 2714 fs/btrfs/extent_io.c u64 start; start 2754 fs/btrfs/extent_io.c start = page_offset(page); start 2755 fs/btrfs/extent_io.c end = start + bvec->bv_offset + bvec->bv_len - 1; start 2761 fs/btrfs/extent_io.c page, start, end, start 2767 fs/btrfs/extent_io.c failure_tree, tree, start, start 2787 fs/btrfs/extent_io.c ret = bio_readpage_error(bio, offset, page, start, end, start 2831 fs/btrfs/extent_io.c endio_readpage_release_extent(tree, start, start 2832 fs/btrfs/extent_io.c end - start + 1, 0); start 2834 fs/btrfs/extent_io.c extent_start = start; start 2835 fs/btrfs/extent_io.c extent_len = end + 1 - start; start 2836 fs/btrfs/extent_io.c } else if (extent_start + extent_len == start) { start 2837 fs/btrfs/extent_io.c extent_len += end + 1 - start; start 2841 fs/btrfs/extent_io.c extent_start = start; start 2842 fs/btrfs/extent_io.c extent_len = end + 1 - start; start 3023 fs/btrfs/extent_io.c u64 start, u64 len, get_extent_t *get_extent, start 3030 fs/btrfs/extent_io.c if (extent_map_in_tree(em) && start >= em->start && start 3031 fs/btrfs/extent_io.c start < extent_map_end(em)) { start 3040 fs/btrfs/extent_io.c em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0); start 3064 fs/btrfs/extent_io.c u64 start = page_offset(page); start 3065 fs/btrfs/extent_io.c const u64 end = start + PAGE_SIZE - 1; start 3066 fs/btrfs/extent_io.c u64 cur = start; start 3086 fs/btrfs/extent_io.c unlock_extent(tree, start, end); start 3129 fs/btrfs/extent_io.c extent_offset = cur - em->start; start 3190 fs/btrfs/extent_io.c *prev_em_start != em->start) start 3194 fs/btrfs/extent_io.c *prev_em_start = em->start; start 3266 fs/btrfs/extent_io.c u64 start, u64 end, start 3275 fs/btrfs/extent_io.c btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL); start 3292 fs/btrfs/extent_io.c u64 start = page_offset(page); start 3293 fs/btrfs/extent_io.c u64 end = start + PAGE_SIZE - 1; start 3296 fs/btrfs/extent_io.c btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL); start 3419 fs/btrfs/extent_io.c u64 start = page_offset(page); start 3420 fs/btrfs/extent_io.c u64 page_end = start + PAGE_SIZE - 1; start 3422 fs/btrfs/extent_io.c u64 cur = start; start 3434 fs/btrfs/extent_io.c ret = btrfs_writepage_cow_fixup(page, start, page_end); start 3454 fs/btrfs/extent_io.c if (i_size <= start) { start 3455 fs/btrfs/extent_io.c btrfs_writepage_endio_finish_ordered(page, start, page_end, 1); start 3478 fs/btrfs/extent_io.c extent_offset = cur - em->start; start 3558 fs/btrfs/extent_io.c u64 start = page_offset(page); start 3559 fs/btrfs/extent_io.c u64 page_end = start + PAGE_SIZE - 1; start 3599 fs/btrfs/extent_io.c ret = writepage_delalloc(inode, page, wbc, start, &nr_written); start 3619 fs/btrfs/extent_io.c end_extent_writepage(page, ret, start, page_end); start 3858 fs/btrfs/extent_io.c u64 offset = eb->start; start 3861 fs/btrfs/extent_io.c unsigned long start, end; start 3880 fs/btrfs/extent_io.c start = btrfs_item_nr_offset(nritems); start 3882 fs/btrfs/extent_io.c memzero_extent_buffer(eb, start, end - start); start 4266 fs/btrfs/extent_io.c int extent_write_locked_range(struct inode *inode, u64 start, u64 end, start 4273 fs/btrfs/extent_io.c unsigned long nr_pages = (end - start + PAGE_SIZE) >> start 4285 fs/btrfs/extent_io.c .range_start = start, start 4289 fs/btrfs/extent_io.c while (start <= end) { start 4290 fs/btrfs/extent_io.c page = find_get_page(mapping, start >> PAGE_SHIFT); start 4294 fs/btrfs/extent_io.c btrfs_writepage_endio_finish_ordered(page, start, start 4295 fs/btrfs/extent_io.c start + PAGE_SIZE - 1, 1); start 4299 fs/btrfs/extent_io.c start += PAGE_SIZE; start 4389 fs/btrfs/extent_io.c u64 start = page_offset(page); start 4390 fs/btrfs/extent_io.c u64 end = start + PAGE_SIZE - 1; start 4393 fs/btrfs/extent_io.c start += ALIGN(offset, blocksize); start 4394 fs/btrfs/extent_io.c if (start > end) start 4397 fs/btrfs/extent_io.c lock_extent_bits(tree, start, end, &cached_state); start 4399 fs/btrfs/extent_io.c clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC | start 4412 fs/btrfs/extent_io.c u64 start = page_offset(page); start 4413 fs/btrfs/extent_io.c u64 end = start + PAGE_SIZE - 1; start 4416 fs/btrfs/extent_io.c if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) { start 4423 fs/btrfs/extent_io.c ret = __clear_extent_bit(tree, start, end, start 4446 fs/btrfs/extent_io.c u64 start = page_offset(page); start 4447 fs/btrfs/extent_io.c u64 end = start + PAGE_SIZE - 1; start 4455 fs/btrfs/extent_io.c while (start <= end) { start 4456 fs/btrfs/extent_io.c len = end - start + 1; start 4458 fs/btrfs/extent_io.c em = lookup_extent_mapping(map, start, len); start 4464 fs/btrfs/extent_io.c em->start != start) { start 4469 fs/btrfs/extent_io.c if (!test_range_bit(tree, em->start, start 4478 fs/btrfs/extent_io.c start = extent_map_end(em); start 4637 fs/btrfs/extent_io.c __u64 start, __u64 len) start 4640 fs/btrfs/extent_io.c u64 off = start; start 4641 fs/btrfs/extent_io.c u64 max = start + len; start 4676 fs/btrfs/extent_io.c start = round_down(start, btrfs_inode_sectorsize(inode)); start 4677 fs/btrfs/extent_io.c len = round_up(max, btrfs_inode_sectorsize(inode)) - start; start 4724 fs/btrfs/extent_io.c lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, start 4727 fs/btrfs/extent_io.c em = get_extent_skip_holes(inode, start, last_for_get_extent); start 4739 fs/btrfs/extent_io.c if (em->start >= max || extent_map_end(em) < off) start 4748 fs/btrfs/extent_io.c em_start = max(em->start, off); start 4757 fs/btrfs/extent_io.c offset_in_extent = em_start - em->start; start 4784 fs/btrfs/extent_io.c (em->start - em->orig_start); start 4838 fs/btrfs/extent_io.c unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, start 4920 fs/btrfs/extent_io.c __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, start 4926 fs/btrfs/extent_io.c eb->start = start; start 4967 fs/btrfs/extent_io.c new = __alloc_extent_buffer(src->fs_info, src->start, src->len); start 4991 fs/btrfs/extent_io.c u64 start, unsigned long len) start 4997 fs/btrfs/extent_io.c eb = __alloc_extent_buffer(fs_info, start, len); start 5020 fs/btrfs/extent_io.c u64 start) start 5022 fs/btrfs/extent_io.c return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize); start 5075 fs/btrfs/extent_io.c u64 start) start 5081 fs/btrfs/extent_io.c start >> PAGE_SHIFT); start 5113 fs/btrfs/extent_io.c u64 start) start 5118 fs/btrfs/extent_io.c eb = find_extent_buffer(fs_info, start); start 5121 fs/btrfs/extent_io.c eb = alloc_dummy_extent_buffer(fs_info, start); start 5133 fs/btrfs/extent_io.c start >> PAGE_SHIFT, eb); start 5137 fs/btrfs/extent_io.c exists = find_extent_buffer(fs_info, start); start 5154 fs/btrfs/extent_io.c u64 start) start 5159 fs/btrfs/extent_io.c unsigned long index = start >> PAGE_SHIFT; start 5167 fs/btrfs/extent_io.c if (!IS_ALIGNED(start, fs_info->sectorsize)) { start 5168 fs/btrfs/extent_io.c btrfs_err(fs_info, "bad tree block start %llu", start); start 5172 fs/btrfs/extent_io.c eb = find_extent_buffer(fs_info, start); start 5176 fs/btrfs/extent_io.c eb = __alloc_extent_buffer(fs_info, start, len); start 5241 fs/btrfs/extent_io.c start >> PAGE_SHIFT, eb); start 5245 fs/btrfs/extent_io.c exists = find_extent_buffer(fs_info, start); start 5296 fs/btrfs/extent_io.c eb->start >> PAGE_SHIFT); start 5556 fs/btrfs/extent_io.c unsigned long start, unsigned long len) start 5563 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5564 fs/btrfs/extent_io.c unsigned long i = (start_offset + start) >> PAGE_SHIFT; start 5566 fs/btrfs/extent_io.c if (start + len > eb->len) { start 5568 fs/btrfs/extent_io.c eb->start, eb->len, start, len); start 5573 fs/btrfs/extent_io.c offset = offset_in_page(start_offset + start); start 5591 fs/btrfs/extent_io.c unsigned long start, unsigned long len) start 5598 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5599 fs/btrfs/extent_io.c unsigned long i = (start_offset + start) >> PAGE_SHIFT; start 5602 fs/btrfs/extent_io.c WARN_ON(start > eb->len); start 5603 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); start 5605 fs/btrfs/extent_io.c offset = offset_in_page(start_offset + start); start 5632 fs/btrfs/extent_io.c unsigned long start, unsigned long min_len, start 5639 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5640 fs/btrfs/extent_io.c unsigned long i = (start_offset + start) >> PAGE_SHIFT; start 5641 fs/btrfs/extent_io.c unsigned long end_i = (start_offset + start + min_len - 1) >> start 5644 fs/btrfs/extent_io.c if (start + min_len > eb->len) { start 5646 fs/btrfs/extent_io.c eb->start, eb->len, start, min_len); start 5669 fs/btrfs/extent_io.c unsigned long start, unsigned long len) start 5676 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5677 fs/btrfs/extent_io.c unsigned long i = (start_offset + start) >> PAGE_SHIFT; start 5680 fs/btrfs/extent_io.c WARN_ON(start > eb->len); start 5681 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); start 5683 fs/btrfs/extent_io.c offset = offset_in_page(start_offset + start); start 5725 fs/btrfs/extent_io.c unsigned long start, unsigned long len) start 5732 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5733 fs/btrfs/extent_io.c unsigned long i = (start_offset + start) >> PAGE_SHIFT; start 5735 fs/btrfs/extent_io.c WARN_ON(start > eb->len); start 5736 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); start 5738 fs/btrfs/extent_io.c offset = offset_in_page(start_offset + start); start 5755 fs/btrfs/extent_io.c void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, start 5762 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5763 fs/btrfs/extent_io.c unsigned long i = (start_offset + start) >> PAGE_SHIFT; start 5765 fs/btrfs/extent_io.c WARN_ON(start > eb->len); start 5766 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); start 5768 fs/btrfs/extent_io.c offset = offset_in_page(start_offset + start); start 5807 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(dst->start); start 5844 fs/btrfs/extent_io.c unsigned long start, unsigned long nr, start 5848 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); start 5857 fs/btrfs/extent_io.c offset = start_offset + start + byte_offset; start 5869 fs/btrfs/extent_io.c int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, start 5877 fs/btrfs/extent_io.c eb_bitmap_offset(eb, start, nr, &i, &offset); start 5891 fs/btrfs/extent_io.c void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, start 5902 fs/btrfs/extent_io.c eb_bitmap_offset(eb, start, pos, &i, &offset); start 5933 fs/btrfs/extent_io.c void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, start 5944 fs/btrfs/extent_io.c eb_bitmap_offset(eb, start, pos, &i, &offset); start 6002 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(dst->start); start 6049 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(dst->start); start 87 fs/btrfs/extent_io.h #define BITMAP_FIRST_BYTE_MASK(start) \ start 88 fs/btrfs/extent_io.h ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) start 110 fs/btrfs/extent_io.h struct page *page, u64 start, u64 end, start 140 fs/btrfs/extent_io.h u64 start; start 159 fs/btrfs/extent_io.h u64 start; start 268 fs/btrfs/extent_io.h int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, start 271 fs/btrfs/extent_io.h static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) start 273 fs/btrfs/extent_io.h return lock_extent_bits(tree, start, end, NULL); start 276 fs/btrfs/extent_io.h int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); start 283 fs/btrfs/extent_io.h u64 *start, u64 search_end, start 287 fs/btrfs/extent_io.h int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, start 290 fs/btrfs/extent_io.h int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, start 292 fs/btrfs/extent_io.h int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 295 fs/btrfs/extent_io.h int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 300 fs/btrfs/extent_io.h static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) start 302 fs/btrfs/extent_io.h return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); start 305 fs/btrfs/extent_io.h static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, start 308 fs/btrfs/extent_io.h return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, start 313 fs/btrfs/extent_io.h u64 start, u64 end, struct extent_state **cached) start 315 fs/btrfs/extent_io.h return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, start 319 fs/btrfs/extent_io.h static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, start 327 fs/btrfs/extent_io.h return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); start 330 fs/btrfs/extent_io.h int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, start 332 fs/btrfs/extent_io.h int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 335 fs/btrfs/extent_io.h int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, start 338 fs/btrfs/extent_io.h static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, start 341 fs/btrfs/extent_io.h return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); start 344 fs/btrfs/extent_io.h static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, start 347 fs/btrfs/extent_io.h return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, start 351 fs/btrfs/extent_io.h static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, start 354 fs/btrfs/extent_io.h return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, start 358 fs/btrfs/extent_io.h static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, start 361 fs/btrfs/extent_io.h return clear_extent_bit(tree, start, end, start 366 fs/btrfs/extent_io.h int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, start 370 fs/btrfs/extent_io.h static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, start 374 fs/btrfs/extent_io.h return set_extent_bit(tree, start, end, start 379 fs/btrfs/extent_io.h static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, start 382 fs/btrfs/extent_io.h return set_extent_bit(tree, start, end, start 387 fs/btrfs/extent_io.h static inline int set_extent_new(struct extent_io_tree *tree, u64 start, start 390 fs/btrfs/extent_io.h return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, start 394 fs/btrfs/extent_io.h static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, start 397 fs/btrfs/extent_io.h return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, start 401 fs/btrfs/extent_io.h int find_first_extent_bit(struct extent_io_tree *tree, u64 start, start 404 fs/btrfs/extent_io.h void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, start 409 fs/btrfs/extent_io.h int extent_write_locked_range(struct inode *inode, u64 start, u64 end, start 418 fs/btrfs/extent_io.h __u64 start, __u64 len); start 422 fs/btrfs/extent_io.h u64 start); start 424 fs/btrfs/extent_io.h u64 start, unsigned long len); start 426 fs/btrfs/extent_io.h u64 start); start 429 fs/btrfs/extent_io.h u64 start); start 441 fs/btrfs/extent_io.h return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) - start 442 fs/btrfs/extent_io.h (eb->start >> PAGE_SHIFT); start 456 fs/btrfs/extent_io.h unsigned long start, unsigned long len); start 458 fs/btrfs/extent_io.h unsigned long start, start 461 fs/btrfs/extent_io.h void __user *dst, unsigned long start, start 467 fs/btrfs/extent_io.h unsigned long start, unsigned long len); start 477 fs/btrfs/extent_io.h void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, start 479 fs/btrfs/extent_io.h int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, start 481 fs/btrfs/extent_io.h void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, start 483 fs/btrfs/extent_io.h void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, start 494 fs/btrfs/extent_io.h void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); start 495 fs/btrfs/extent_io.h void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); start 496 fs/btrfs/extent_io.h void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, start 508 fs/btrfs/extent_io.h int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, start 513 fs/btrfs/extent_io.h struct extent_io_tree *io_tree, u64 start, start 515 fs/btrfs/extent_io.h void end_extent_writepage(struct page *page, int err, u64 start, u64 end); start 528 fs/btrfs/extent_io.h u64 start; start 538 fs/btrfs/extent_io.h void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, start 540 fs/btrfs/extent_io.h int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, start 553 fs/btrfs/extent_io.h struct page *locked_page, u64 *start, start 557 fs/btrfs/extent_io.h u64 start); start 87 fs/btrfs/extent_map.c static u64 range_end(u64 start, u64 len) start 89 fs/btrfs/extent_map.c if (start + len < start) start 91 fs/btrfs/extent_map.c return start + len; start 100 fs/btrfs/extent_map.c u64 end = range_end(em->start, em->len); start 107 fs/btrfs/extent_map.c if (em->start < entry->start) { start 109 fs/btrfs/extent_map.c } else if (em->start >= extent_map_end(entry)) { start 118 fs/btrfs/extent_map.c while (parent && em->start >= extent_map_end(entry)) { start 123 fs/btrfs/extent_map.c if (end > entry->start && em->start < extent_map_end(entry)) start 128 fs/btrfs/extent_map.c while (parent && em->start < entry->start) { start 133 fs/btrfs/extent_map.c if (end > entry->start && em->start < extent_map_end(entry)) start 160 fs/btrfs/extent_map.c if (offset < entry->start) start 180 fs/btrfs/extent_map.c while (prev && offset < prev_entry->start) { start 217 fs/btrfs/extent_map.c if (extent_map_end(prev) == next->start && start 247 fs/btrfs/extent_map.c if (em->start != 0) { start 252 fs/btrfs/extent_map.c em->start = merge->start; start 292 fs/btrfs/extent_map.c int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, start 300 fs/btrfs/extent_map.c em = lookup_extent_mapping(tree, start, len); start 302 fs/btrfs/extent_map.c WARN_ON(!em || em->start != start); start 309 fs/btrfs/extent_map.c em->mod_start = em->start; start 320 fs/btrfs/extent_map.c em->mod_start = em->start; start 343 fs/btrfs/extent_map.c em->mod_start = em->start; start 415 fs/btrfs/extent_map.c u64 start, u64 len, int strict) start 421 fs/btrfs/extent_map.c u64 end = range_end(start, len); start 423 fs/btrfs/extent_map.c rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next); start 435 fs/btrfs/extent_map.c if (strict && !(end > em->start && start < extent_map_end(em))) start 454 fs/btrfs/extent_map.c u64 start, u64 len) start 456 fs/btrfs/extent_map.c return __lookup_extent_mapping(tree, start, len, 1); start 471 fs/btrfs/extent_map.c u64 start, u64 len) start 473 fs/btrfs/extent_map.c return __lookup_extent_mapping(tree, start, len, 0); start 543 fs/btrfs/extent_map.c u64 start; start 547 fs/btrfs/extent_map.c BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); start 549 fs/btrfs/extent_map.c if (existing->start > map_start) { start 557 fs/btrfs/extent_map.c start = prev ? extent_map_end(prev) : em->start; start 558 fs/btrfs/extent_map.c start = max_t(u64, start, em->start); start 559 fs/btrfs/extent_map.c end = next ? next->start : extent_map_end(em); start 561 fs/btrfs/extent_map.c start_diff = start - em->start; start 562 fs/btrfs/extent_map.c em->start = start; start 563 fs/btrfs/extent_map.c em->len = end - start; start 594 fs/btrfs/extent_map.c struct extent_map **em_in, u64 start, u64 len) start 609 fs/btrfs/extent_map.c existing = search_extent_mapping(em_tree, start, len); start 611 fs/btrfs/extent_map.c trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); start 617 fs/btrfs/extent_map.c if (start >= existing->start && start 618 fs/btrfs/extent_map.c start < extent_map_end(existing)) { start 623 fs/btrfs/extent_map.c u64 orig_start = em->start; start 631 fs/btrfs/extent_map.c em, start); start 637 fs/btrfs/extent_map.c ret, existing->start, existing->len, start 34 fs/btrfs/extent_map.h u64 start; start 72 fs/btrfs/extent_map.h if (em->start + em->len < em->start) start 74 fs/btrfs/extent_map.h return em->start + em->len; start 86 fs/btrfs/extent_map.h u64 start, u64 len); start 99 fs/btrfs/extent_map.h int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); start 102 fs/btrfs/extent_map.h u64 start, u64 len); start 105 fs/btrfs/extent_map.h struct extent_map **em_in, u64 start, u64 len); start 303 fs/btrfs/file-item.c int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, start 319 fs/btrfs/file-item.c ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && start 333 fs/btrfs/file-item.c key.offset = start; start 344 fs/btrfs/file-item.c offset = (start - key.offset) >> start 352 fs/btrfs/file-item.c while (start <= end) { start 369 fs/btrfs/file-item.c if (key.offset > start) start 370 fs/btrfs/file-item.c start = key.offset; start 374 fs/btrfs/file-item.c if (csum_end <= start) { start 382 fs/btrfs/file-item.c while (start < csum_end) { start 383 fs/btrfs/file-item.c size = min_t(size_t, csum_end - start, start 392 fs/btrfs/file-item.c sums->bytenr = start; start 395 fs/btrfs/file-item.c offset = (start - key.offset) >> start 405 fs/btrfs/file-item.c start += fs_info->sectorsize * size; start 970 fs/btrfs/file-item.c em->start = extent_start; start 994 fs/btrfs/file-item.c em->start = extent_start; start 310 fs/btrfs/file.c range.start = defrag->last_offset; start 322 fs/btrfs/file.c defrag->last_offset = range.start; start 467 fs/btrfs/file.c const u64 start, start 471 fs/btrfs/file.c u64 search_start = start; start 472 fs/btrfs/file.c const u64 end = start + len - 1; start 489 fs/btrfs/file.c if (em->start < search_start) start 490 fs/btrfs/file.c em_len -= search_start - em->start; start 587 fs/btrfs/file.c void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, start 594 fs/btrfs/file.c u64 len = end - start + 1; start 602 fs/btrfs/file.c WARN_ON(end < start); start 619 fs/btrfs/file.c em = lookup_extent_mapping(em_tree, start, len); start 627 fs/btrfs/file.c if (testend && em->start + em->len >= start + len) { start 632 fs/btrfs/file.c start = em->start + em->len; start 634 fs/btrfs/file.c len = start + len - (em->start + em->len); start 646 fs/btrfs/file.c if (em->start < start) { start 647 fs/btrfs/file.c split->start = em->start; start 648 fs/btrfs/file.c split->len = start - em->start; start 662 fs/btrfs/file.c split->orig_start = split->start; start 678 fs/btrfs/file.c if (testend && em->start + em->len > start + len) { start 679 fs/btrfs/file.c u64 diff = start + len - em->start; start 681 fs/btrfs/file.c split->start = start + len; start 682 fs/btrfs/file.c split->len = em->start + em->len - (start + len); start 705 fs/btrfs/file.c split->orig_start = split->start; start 749 fs/btrfs/file.c struct btrfs_path *path, u64 start, u64 end, start 762 fs/btrfs/file.c u64 search_start = start; start 767 fs/btrfs/file.c u64 last_end = start; start 779 fs/btrfs/file.c btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0); start 781 fs/btrfs/file.c if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) start 792 fs/btrfs/file.c if (ret > 0 && path->slots[0] > 0 && search_start == start) { start 869 fs/btrfs/file.c search_start = max(key.offset, start); start 880 fs/btrfs/file.c if (start > key.offset && end < extent_end) { start 888 fs/btrfs/file.c new_key.offset = start; start 902 fs/btrfs/file.c start - key.offset); start 907 fs/btrfs/file.c extent_offset += start - key.offset; start 910 fs/btrfs/file.c extent_end - start); start 920 fs/btrfs/file.c start - extent_offset); start 924 fs/btrfs/file.c key.offset = start; start 936 fs/btrfs/file.c if (start <= key.offset && end < extent_end) { start 961 fs/btrfs/file.c if (start > key.offset && end >= extent_end) { start 969 fs/btrfs/file.c start - key.offset); start 972 fs/btrfs/file.c inode_sub_bytes(inode, extent_end - start); start 984 fs/btrfs/file.c if (start <= key.offset && end >= extent_end) { start 1066 fs/btrfs/file.c key.offset = start; start 1090 fs/btrfs/file.c struct btrfs_root *root, struct inode *inode, u64 start, start 1099 fs/btrfs/file.c ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL, start 1107 fs/btrfs/file.c u64 *start, u64 *end) start 1130 fs/btrfs/file.c if ((*start && *start != key.offset) || (*end && *end != extent_end)) start 1133 fs/btrfs/file.c *start = key.offset; start 1146 fs/btrfs/file.c struct btrfs_inode *inode, u64 start, u64 end) start 1174 fs/btrfs/file.c split = start; start 1201 fs/btrfs/file.c if (key.offset > start || extent_end < end) { start 1212 fs/btrfs/file.c if (start == key.offset && end < extent_end) { start 1214 fs/btrfs/file.c other_end = start; start 1239 fs/btrfs/file.c if (start > key.offset && end == extent_end) { start 1248 fs/btrfs/file.c start - key.offset); start 1252 fs/btrfs/file.c new_key.offset = start; start 1260 fs/btrfs/file.c other_end - start); start 1262 fs/btrfs/file.c start - orig_offset); start 1268 fs/btrfs/file.c while (start > key.offset || end < extent_end) { start 1269 fs/btrfs/file.c if (key.offset == start) start 1309 fs/btrfs/file.c if (split == start) { start 1310 fs/btrfs/file.c key.offset = start; start 1312 fs/btrfs/file.c if (start != key.offset) { start 1345 fs/btrfs/file.c other_end = start; start 2031 fs/btrfs/file.c static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) start 2044 fs/btrfs/file.c ret = btrfs_fdatawrite_range(inode, start, end); start 2062 fs/btrfs/file.c int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) start 2082 fs/btrfs/file.c start = 0; start 2092 fs/btrfs/file.c ret = start_ordered_ops(inode, start, end); start 2116 fs/btrfs/file.c start = 0; start 2138 fs/btrfs/file.c ret = start_ordered_ops(inode, start, end); start 2152 fs/btrfs/file.c ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1); start 2202 fs/btrfs/file.c ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx); start 2261 fs/btrfs/file.c int slot, u64 start, u64 end) start 2284 fs/btrfs/file.c if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) start 2366 fs/btrfs/file.c hole_em->start = offset; start 2399 fs/btrfs/file.c static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) start 2406 fs/btrfs/file.c round_down(*start, fs_info->sectorsize), start 2414 fs/btrfs/file.c *len = em->start + em->len > *start + *len ? start 2415 fs/btrfs/file.c 0 : *start + *len - em->start - em->len; start 2416 fs/btrfs/file.c *start = em->start + em->len; start 2530 fs/btrfs/file.c const u64 start, const u64 end, start 2543 fs/btrfs/file.c u64 len = end - start; start 2546 fs/btrfs/file.c if (end <= start) start 2580 fs/btrfs/file.c cur_offset = start; start 2898 fs/btrfs/file.c u64 start; start 2908 fs/btrfs/file.c static int add_falloc_range(struct list_head *head, u64 start, u64 len) start 2921 fs/btrfs/file.c if (prev->start + prev->len == start) { start 2929 fs/btrfs/file.c range->start = start; start 3022 fs/btrfs/file.c if (em->start <= alloc_start && start 3024 fs/btrfs/file.c const u64 em_end = em->start + em->len; start 3334 fs/btrfs/file.c range->start, start 3339 fs/btrfs/file.c data_reserved, range->start, start 3372 fs/btrfs/file.c u64 start; start 3383 fs/btrfs/file.c start = max_t(loff_t, 0, *offset); start 3385 fs/btrfs/file.c lockstart = round_down(start, fs_info->sectorsize); start 3396 fs/btrfs/file.c while (start < inode->i_size) { start 3397 fs/btrfs/file.c em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); start 3413 fs/btrfs/file.c start = em->start + em->len; start 3420 fs/btrfs/file.c if (whence == SEEK_DATA && start >= inode->i_size) start 3423 fs/btrfs/file.c *offset = min_t(loff_t, start, inode->i_size); start 3501 fs/btrfs/file.c int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end) start 3519 fs/btrfs/file.c ret = filemap_fdatawrite_range(inode->i_mapping, start, end); start 3522 fs/btrfs/file.c ret = filemap_fdatawrite_range(inode->i_mapping, start, end); start 29 fs/btrfs/free-space-cache.c u64 start; start 982 fs/btrfs/free-space-cache.c ret = io_ctl_add_entry(io_ctl, trim_entry->start, start 1054 fs/btrfs/free-space-cache.c u64 start, extent_start, extent_end, len; start 1070 fs/btrfs/free-space-cache.c start = block_group->key.objectid; start 1072 fs/btrfs/free-space-cache.c while (start < block_group->key.objectid + block_group->key.offset) { start 1073 fs/btrfs/free-space-cache.c ret = find_first_extent_bit(unpin, start, start 1084 fs/btrfs/free-space-cache.c extent_start = max(extent_start, start); start 1094 fs/btrfs/free-space-cache.c start = extent_end; start 1441 fs/btrfs/free-space-cache.c bitmap_start = offset - ctl->start; start 1444 fs/btrfs/free-space-cache.c bitmap_start += ctl->start; start 1705 fs/btrfs/free-space-cache.c unsigned long start, count; start 1707 fs/btrfs/free-space-cache.c start = offset_to_bit(info->offset, ctl->unit, offset); start 1709 fs/btrfs/free-space-cache.c ASSERT(start + count <= BITS_PER_BITMAP); start 1711 fs/btrfs/free-space-cache.c bitmap_clear(info->bitmap, start, count); start 1730 fs/btrfs/free-space-cache.c unsigned long start, count; start 1732 fs/btrfs/free-space-cache.c start = offset_to_bit(info->offset, ctl->unit, offset); start 1734 fs/btrfs/free-space-cache.c ASSERT(start + count <= BITS_PER_BITMAP); start 1736 fs/btrfs/free-space-cache.c bitmap_set(info->bitmap, start, count); start 1836 fs/btrfs/free-space-cache.c tmp = entry->offset - ctl->start + align - 1; start 1838 fs/btrfs/free-space-cache.c tmp = tmp * align + ctl->start; start 2519 fs/btrfs/free-space-cache.c ctl->start = block_group->key.objectid; start 2849 fs/btrfs/free-space-cache.c unsigned long start = 0; start 2887 fs/btrfs/free-space-cache.c start = i; start 2901 fs/btrfs/free-space-cache.c cluster->window_start = start * ctl->unit + entry->offset; start 3151 fs/btrfs/free-space-cache.c u64 *total_trimmed, u64 start, u64 bytes, start 3172 fs/btrfs/free-space-cache.c ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed); start 3196 fs/btrfs/free-space-cache.c u64 *total_trimmed, u64 start, u64 end, u64 minlen) start 3206 fs/btrfs/free-space-cache.c while (start < end) { start 3218 fs/btrfs/free-space-cache.c entry = tree_search_offset(ctl, start, 0, 1); start 3245 fs/btrfs/free-space-cache.c start = max(start, extent_start); start 3246 fs/btrfs/free-space-cache.c bytes = min(extent_start + extent_bytes, end) - start; start 3257 fs/btrfs/free-space-cache.c trim_entry.start = extent_start; start 3262 fs/btrfs/free-space-cache.c ret = do_trimming(block_group, total_trimmed, start, bytes, start 3267 fs/btrfs/free-space-cache.c start += bytes; start 3281 fs/btrfs/free-space-cache.c u64 *total_trimmed, u64 start, u64 end, u64 minlen) start 3288 fs/btrfs/free-space-cache.c u64 offset = offset_to_bitmap(ctl, start); start 3312 fs/btrfs/free-space-cache.c ret2 = search_bitmap(ctl, entry, &start, &bytes, false); start 3313 fs/btrfs/free-space-cache.c if (ret2 || start >= end) { start 3320 fs/btrfs/free-space-cache.c bytes = min(bytes, end - start); start 3327 fs/btrfs/free-space-cache.c bitmap_clear_bits(ctl, entry, start, bytes); start 3332 fs/btrfs/free-space-cache.c trim_entry.start = start; start 3337 fs/btrfs/free-space-cache.c ret = do_trimming(block_group, total_trimmed, start, bytes, start 3338 fs/btrfs/free-space-cache.c start, bytes, &trim_entry); start 3345 fs/btrfs/free-space-cache.c start += bytes; start 3346 fs/btrfs/free-space-cache.c if (start >= offset + BITS_PER_BITMAP * ctl->unit) start 3402 fs/btrfs/free-space-cache.c u64 *trimmed, u64 start, u64 end, u64 minlen) start 3416 fs/btrfs/free-space-cache.c ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); start 3420 fs/btrfs/free-space-cache.c ret = trim_bitmaps(block_group, trimmed, start, end, minlen); start 26 fs/btrfs/free-space-cache.h u64 start; start 112 fs/btrfs/free-space-cache.h u64 *trimmed, u64 start, u64 end, u64 minlen); start 161 fs/btrfs/free-space-tree.c static void le_bitmap_set(unsigned long *map, unsigned int start, int len) start 163 fs/btrfs/free-space-tree.c u8 *p = ((u8 *)map) + BIT_BYTE(start); start 164 fs/btrfs/free-space-tree.c const unsigned int size = start + len; start 165 fs/btrfs/free-space-tree.c int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); start 166 fs/btrfs/free-space-tree.c u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); start 193 fs/btrfs/free-space-tree.c u64 start, end; start 208 fs/btrfs/free-space-tree.c start = block_group->key.objectid; start 234 fs/btrfs/free-space-tree.c ASSERT(found_key.objectid >= start); start 238 fs/btrfs/free-space-tree.c first = div_u64(found_key.objectid - start, start 240 fs/btrfs/free-space-tree.c last = div_u64(found_key.objectid + found_key.offset - start, start 283 fs/btrfs/free-space-tree.c i = start; start 332 fs/btrfs/free-space-tree.c u64 start, end; start 347 fs/btrfs/free-space-tree.c start = block_group->key.objectid; start 375 fs/btrfs/free-space-tree.c ASSERT(found_key.objectid >= start); start 379 fs/btrfs/free-space-tree.c bitmap_pos = div_u64(found_key.objectid - start, start 423 fs/btrfs/free-space-tree.c key.objectid = start + start_bit * block_group->fs_info->sectorsize; start 517 fs/btrfs/free-space-tree.c struct btrfs_path *path, u64 *start, u64 *size, start 523 fs/btrfs/free-space-tree.c u64 end = *start + *size; start 533 fs/btrfs/free-space-tree.c ASSERT(*start >= found_start && *start < found_end); start 540 fs/btrfs/free-space-tree.c first = div_u64(*start - found_start, fs_info->sectorsize); start 548 fs/btrfs/free-space-tree.c *size -= end - *start; start 549 fs/btrfs/free-space-tree.c *start = end; start 586 fs/btrfs/free-space-tree.c u64 start, u64 size, int remove) start 590 fs/btrfs/free-space-tree.c u64 end = start + size; start 600 fs/btrfs/free-space-tree.c if (start > block_group->key.objectid) { start 601 fs/btrfs/free-space-tree.c u64 prev_block = start - block_group->fs_info->sectorsize; start 615 fs/btrfs/free-space-tree.c if (start >= key.objectid + key.offset) { start 621 fs/btrfs/free-space-tree.c key.objectid = start; start 636 fs/btrfs/free-space-tree.c cur_start = start; start 699 fs/btrfs/free-space-tree.c u64 start, u64 size) start 704 fs/btrfs/free-space-tree.c u64 end = start + size; start 708 fs/btrfs/free-space-tree.c key.objectid = start; start 722 fs/btrfs/free-space-tree.c ASSERT(start >= found_start && end <= found_end); start 749 fs/btrfs/free-space-tree.c if (start > found_start) { start 752 fs/btrfs/free-space-tree.c key.offset = start - found_start; start 785 fs/btrfs/free-space-tree.c struct btrfs_path *path, u64 start, u64 size) start 805 fs/btrfs/free-space-tree.c start, size, 1); start 808 fs/btrfs/free-space-tree.c start, size); start 813 fs/btrfs/free-space-tree.c u64 start, u64 size) start 828 fs/btrfs/free-space-tree.c block_group = btrfs_lookup_block_group(trans->fs_info, start); start 836 fs/btrfs/free-space-tree.c ret = __remove_from_free_space_tree(trans, block_group, path, start, start 851 fs/btrfs/free-space-tree.c u64 start, u64 size) start 856 fs/btrfs/free-space-tree.c u64 end = start + size; start 878 fs/btrfs/free-space-tree.c new_key.objectid = start; start 883 fs/btrfs/free-space-tree.c if (start == block_group->key.objectid) start 885 fs/btrfs/free-space-tree.c key.objectid = start - 1; start 905 fs/btrfs/free-space-tree.c ASSERT(found_start < start && found_end <= start); start 911 fs/btrfs/free-space-tree.c if (found_end == start) { start 945 fs/btrfs/free-space-tree.c ASSERT((found_start < start && found_end <= start) || start 978 fs/btrfs/free-space-tree.c struct btrfs_path *path, u64 start, u64 size) start 998 fs/btrfs/free-space-tree.c start, size, 0); start 1000 fs/btrfs/free-space-tree.c return add_free_space_extent(trans, block_group, path, start, start 1006 fs/btrfs/free-space-tree.c u64 start, u64 size) start 1021 fs/btrfs/free-space-tree.c block_group = btrfs_lookup_block_group(trans->fs_info, start); start 1029 fs/btrfs/free-space-tree.c ret = __add_to_free_space_tree(trans, block_group, path, start, size); start 1051 fs/btrfs/free-space-tree.c u64 start, end; start 1087 fs/btrfs/free-space-tree.c start = block_group->key.objectid; start 1097 fs/btrfs/free-space-tree.c if (start < key.objectid) { start 1100 fs/btrfs/free-space-tree.c path2, start, start 1102 fs/btrfs/free-space-tree.c start); start 1106 fs/btrfs/free-space-tree.c start = key.objectid; start 1108 fs/btrfs/free-space-tree.c start += trans->fs_info->nodesize; start 1110 fs/btrfs/free-space-tree.c start += key.offset; start 1122 fs/btrfs/free-space-tree.c if (start < end) { start 1124 fs/btrfs/free-space-tree.c start, end - start); start 1321 fs/btrfs/free-space-tree.c u64 start, end; start 1339 fs/btrfs/free-space-tree.c start = block_group->key.objectid; start 1366 fs/btrfs/free-space-tree.c ASSERT(found_key.objectid >= start); start 28 fs/btrfs/free-space-tree.h u64 start, u64 size); start 30 fs/btrfs/free-space-tree.h u64 start, u64 size); start 39 fs/btrfs/free-space-tree.h struct btrfs_path *path, u64 start, u64 size); start 42 fs/btrfs/free-space-tree.h struct btrfs_path *path, u64 start, u64 size); start 375 fs/btrfs/inode-map.c ctl->start = 0; start 390 fs/btrfs/inode-map.c pinned->start = 0; start 85 fs/btrfs/inode.c u64 start, u64 end, int *page_started, start 87 fs/btrfs/inode.c static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, start 169 fs/btrfs/inode.c u64 start, size_t size, size_t compressed_size, start 195 fs/btrfs/inode.c key.offset = start; start 235 fs/btrfs/inode.c start >> PAGE_SHIFT); start 238 fs/btrfs/inode.c offset = offset_in_page(start); start 268 fs/btrfs/inode.c static noinline int cow_file_range_inline(struct inode *inode, u64 start, start 278 fs/btrfs/inode.c u64 inline_len = actual_end - start; start 289 fs/btrfs/inode.c if (start > 0 || start 318 fs/btrfs/inode.c start, aligned_end, NULL, start 328 fs/btrfs/inode.c root, inode, start, start 340 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); start 355 fs/btrfs/inode.c u64 start; start 367 fs/btrfs/inode.c u64 start; start 382 fs/btrfs/inode.c u64 start, u64 ram_size, start 392 fs/btrfs/inode.c async_extent->start = start; start 417 fs/btrfs/inode.c static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) start 439 fs/btrfs/inode.c return btrfs_compress_heuristic(inode, start, end); start 444 fs/btrfs/inode.c u64 start, u64 end, u64 num_bytes, u64 small_write) start 448 fs/btrfs/inode.c (start > 0 || end + 1 < inode->disk_i_size)) start 474 fs/btrfs/inode.c u64 start = async_chunk->start; start 489 fs/btrfs/inode.c inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, start 507 fs/btrfs/inode.c nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; start 522 fs/btrfs/inode.c if (actual_end <= start) start 525 fs/btrfs/inode.c total_compressed = actual_end - start; start 532 fs/btrfs/inode.c (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) start 545 fs/btrfs/inode.c if (inode_need_compress(inode, start, end)) { start 572 fs/btrfs/inode.c extent_range_clear_dirty_for_io(inode, start, end); start 579 fs/btrfs/inode.c inode->i_mapping, start, start 603 fs/btrfs/inode.c if (start == 0) { start 609 fs/btrfs/inode.c ret = cow_file_range_inline(inode, start, end, 0, start 613 fs/btrfs/inode.c ret = cow_file_range_inline(inode, start, end, start 635 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, end, NULL, start 675 fs/btrfs/inode.c add_async_extent(async_chunk, start, total_in, start 679 fs/btrfs/inode.c if (start + total_in < end) { start 680 fs/btrfs/inode.c start += total_in; start 716 fs/btrfs/inode.c (page_offset(async_chunk->locked_page) >= start && start 723 fs/btrfs/inode.c extent_range_redirty_for_io(inode, start, end); start 724 fs/btrfs/inode.c add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, start 772 fs/btrfs/inode.c lock_extent(io_tree, async_extent->start, start 773 fs/btrfs/inode.c async_extent->start + async_extent->ram_size - 1); start 781 fs/btrfs/inode.c async_extent->start, start 782 fs/btrfs/inode.c async_extent->start + start 796 fs/btrfs/inode.c async_extent->start, start 797 fs/btrfs/inode.c async_extent->start + start 815 fs/btrfs/inode.c unlock_extent(io_tree, async_extent->start, start 816 fs/btrfs/inode.c async_extent->start + start 826 fs/btrfs/inode.c async_extent->start, start 827 fs/btrfs/inode.c async_extent->start + start 838 fs/btrfs/inode.c em = create_io_em(inode, async_extent->start, start 840 fs/btrfs/inode.c async_extent->start, /* orig_start */ start 853 fs/btrfs/inode.c async_extent->start, start 861 fs/btrfs/inode.c async_extent->start, start 862 fs/btrfs/inode.c async_extent->start + start 871 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, async_extent->start, start 872 fs/btrfs/inode.c async_extent->start + start 878 fs/btrfs/inode.c async_extent->start, start 885 fs/btrfs/inode.c const u64 start = async_extent->start; start 886 fs/btrfs/inode.c const u64 end = start + async_extent->ram_size - 1; start 889 fs/btrfs/inode.c btrfs_writepage_endio_finish_ordered(p, start, end, 0); start 892 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, end, start 907 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, async_extent->start, start 908 fs/btrfs/inode.c async_extent->start + start 921 fs/btrfs/inode.c static u64 get_extent_allocation_hint(struct inode *inode, u64 start, start 929 fs/btrfs/inode.c em = search_extent_mapping(em_tree, start, num_bytes); start 968 fs/btrfs/inode.c u64 start, u64 end, int *page_started, start 991 fs/btrfs/inode.c num_bytes = ALIGN(end - start + 1, blocksize); start 995 fs/btrfs/inode.c inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); start 997 fs/btrfs/inode.c if (start == 0) { start 999 fs/btrfs/inode.c ret = cow_file_range_inline(inode, start, end, 0, start 1008 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, end, NULL, start 1015 fs/btrfs/inode.c (end - start + PAGE_SIZE) / PAGE_SIZE; start 1023 fs/btrfs/inode.c alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); start 1024 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), start, start 1025 fs/btrfs/inode.c start + num_bytes - 1, 0); start 1038 fs/btrfs/inode.c em = create_io_em(inode, start, ins.offset, /* len */ start 1039 fs/btrfs/inode.c start, /* orig_start */ start 1052 fs/btrfs/inode.c ret = btrfs_add_ordered_extent(inode, start, ins.objectid, start 1059 fs/btrfs/inode.c ret = btrfs_reloc_clone_csums(inode, start, start 1073 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), start, start 1074 fs/btrfs/inode.c start + ram_size - 1, 0); start 1089 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, start 1090 fs/btrfs/inode.c start + ram_size - 1, start 1099 fs/btrfs/inode.c start += cur_alloc_size; start 1114 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); start 1134 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, start 1135 fs/btrfs/inode.c start + cur_alloc_size, start 1139 fs/btrfs/inode.c start += cur_alloc_size; start 1140 fs/btrfs/inode.c if (start >= end) start 1143 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, end, locked_page, start 1176 fs/btrfs/inode.c nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> start 1210 fs/btrfs/inode.c u64 start, u64 end, int *page_started, start 1219 fs/btrfs/inode.c u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); start 1224 fs/btrfs/inode.c unlock_extent(&BTRFS_I(inode)->io_tree, start, end); start 1246 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, end, locked_page, start 1256 fs/btrfs/inode.c cur_end = min(end, start + SZ_512K - 1); start 1267 fs/btrfs/inode.c async_chunk[i].start = start; start 1291 fs/btrfs/inode.c nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); start 1297 fs/btrfs/inode.c start = cur_end + 1; start 1334 fs/btrfs/inode.c const u64 start, const u64 end, start 1342 fs/btrfs/inode.c u64 cur_offset = start; start 1352 fs/btrfs/inode.c extent_clear_unlock_delalloc(inode, start, end, locked_page, start 1541 fs/btrfs/inode.c if (extent_end <= start) { start 1685 fs/btrfs/inode.c static inline int need_force_cow(struct inode *inode, u64 start, u64 end) start 1698 fs/btrfs/inode.c test_range_bit(&BTRFS_I(inode)->io_tree, start, end, start 1710 fs/btrfs/inode.c u64 start, u64 end, int *page_started, unsigned long *nr_written, start 1714 fs/btrfs/inode.c int force_cow = need_force_cow(inode, start, end); start 1718 fs/btrfs/inode.c ret = run_delalloc_nocow(inode, locked_page, start, end, start 1721 fs/btrfs/inode.c ret = run_delalloc_nocow(inode, locked_page, start, end, start 1724 fs/btrfs/inode.c !inode_need_compress(inode, start, end)) { start 1725 fs/btrfs/inode.c ret = cow_file_range(inode, locked_page, start, end, start 1730 fs/btrfs/inode.c ret = cow_file_range_async(inode, locked_page, start, end, start 1735 fs/btrfs/inode.c btrfs_cleanup_ordered_extents(inode, locked_page, start, start 1736 fs/btrfs/inode.c end - start + 1); start 1749 fs/btrfs/inode.c size = orig->end - orig->start + 1; start 1760 fs/btrfs/inode.c new_size = split - orig->start; start 1786 fs/btrfs/inode.c if (new->start > other->start) start 1787 fs/btrfs/inode.c new_size = new->end - other->start + 1; start 1789 fs/btrfs/inode.c new_size = other->end - new->start + 1; start 1817 fs/btrfs/inode.c old_size = other->end - other->start + 1; start 1819 fs/btrfs/inode.c old_size = new->end - new->start + 1; start 1899 fs/btrfs/inode.c u64 len = state->end + 1 - state->start; start 1927 fs/btrfs/inode.c state->start; start 1941 fs/btrfs/inode.c u64 len = state->end + 1 - state->start; start 1981 fs/btrfs/inode.c state->start, len); start 2159 fs/btrfs/inode.c int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, start 2164 fs/btrfs/inode.c return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, start 2318 fs/btrfs/inode.c int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) start 2730 fs/btrfs/inode.c u64 start; start 2814 fs/btrfs/inode.c start = backref->file_pos; start 2816 fs/btrfs/inode.c start += old->extent_offset + old->offset - start 2823 fs/btrfs/inode.c ret = btrfs_drop_extents(trans, root, inode, start, start 2824 fs/btrfs/inode.c start + len, 1); start 2830 fs/btrfs/inode.c key.offset = start; start 2850 fs/btrfs/inode.c if (extent_len + found_key.offset == start && start 2878 fs/btrfs/inode.c btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); start 2993 fs/btrfs/inode.c new->bytenr = ordered->start; start 3089 fs/btrfs/inode.c u64 start, u64 len) start 3093 fs/btrfs/inode.c cache = btrfs_lookup_block_group(fs_info, start); start 3222 fs/btrfs/inode.c ordered_extent->start, start 3230 fs/btrfs/inode.c ordered_extent->start, start 3276 fs/btrfs/inode.c u64 start, end; start 3279 fs/btrfs/inode.c start = ordered_extent->file_offset + logical_len; start 3281 fs/btrfs/inode.c start = ordered_extent->file_offset; start 3283 fs/btrfs/inode.c clear_extent_uptodate(io_tree, start, end, NULL); start 3286 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); start 3303 fs/btrfs/inode.c ordered_extent->start, start 3339 fs/btrfs/inode.c void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, start 3347 fs/btrfs/inode.c trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); start 3350 fs/btrfs/inode.c if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, start 3351 fs/btrfs/inode.c end - start + 1, uptodate)) start 3366 fs/btrfs/inode.c int pgoff, u64 start, size_t len) start 3390 fs/btrfs/inode.c btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, start 3405 fs/btrfs/inode.c u64 start, u64 end, int mirror) start 3407 fs/btrfs/inode.c size_t offset = start - page_offset(page); start 3421 fs/btrfs/inode.c test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { start 3422 fs/btrfs/inode.c clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM); start 3428 fs/btrfs/inode.c start, (size_t)(end - start + 1)); start 5257 fs/btrfs/inode.c hole_em->start = cur_offset; start 5471 fs/btrfs/inode.c u64 start; start 5477 fs/btrfs/inode.c start = state->start; start 5482 fs/btrfs/inode.c lock_extent_bits(io_tree, start, end, &cached_state); start 5493 fs/btrfs/inode.c btrfs_qgroup_free_data(inode, NULL, start, end - start + 1); start 5495 fs/btrfs/inode.c clear_extent_bit(io_tree, start, end, start 7019 fs/btrfs/inode.c size_t pg_offset, u64 start, u64 len, start 7040 fs/btrfs/inode.c em = lookup_extent_mapping(em_tree, start, len); start 7046 fs/btrfs/inode.c if (em->start > start || em->start + em->len <= start) start 7059 fs/btrfs/inode.c em->start = EXTENT_MAP_HOLE; start 7079 fs/btrfs/inode.c ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); start 7101 fs/btrfs/inode.c extent_end = start; start 7134 fs/btrfs/inode.c if (start >= extent_end) { start 7150 fs/btrfs/inode.c if (start + len <= found_key.offset) start 7152 fs/btrfs/inode.c if (start > found_key.offset) start 7156 fs/btrfs/inode.c em->start = start; start 7157 fs/btrfs/inode.c em->orig_start = start; start 7158 fs/btrfs/inode.c em->len = found_key.offset - start; start 7183 fs/btrfs/inode.c em->start = extent_start + extent_offset; start 7186 fs/btrfs/inode.c em->orig_start = em->start; start 7212 fs/btrfs/inode.c set_extent_uptodate(io_tree, em->start, start 7217 fs/btrfs/inode.c em->start = start; start 7218 fs/btrfs/inode.c em->orig_start = start; start 7223 fs/btrfs/inode.c if (em->start > start || extent_map_end(em) <= start) { start 7226 fs/btrfs/inode.c em->start, em->len, start, len); start 7233 fs/btrfs/inode.c err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); start 7249 fs/btrfs/inode.c u64 start, u64 len) start 7253 fs/btrfs/inode.c u64 delalloc_start = start; start 7259 fs/btrfs/inode.c em = btrfs_get_extent(inode, NULL, 0, start, len, 0); start 7275 fs/btrfs/inode.c end = start + len; start 7276 fs/btrfs/inode.c if (end < start) start 7294 fs/btrfs/inode.c if (delalloc_start > end || delalloc_end <= start) { start 7304 fs/btrfs/inode.c delalloc_start = max(start, delalloc_start); start 7327 fs/btrfs/inode.c if (hole_end <= start || hole_em->start > end) { start 7331 fs/btrfs/inode.c hole_start = max(hole_em->start, start); start 7342 fs/btrfs/inode.c em->start = hole_start; start 7357 fs/btrfs/inode.c em->start = delalloc_start; start 7377 fs/btrfs/inode.c const u64 start, start 7390 fs/btrfs/inode.c em = create_io_em(inode, start, len, orig_start, start 7398 fs/btrfs/inode.c ret = btrfs_add_ordered_extent_dio(inode, start, block_start, start 7403 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), start, start 7404 fs/btrfs/inode.c start + len - 1, 0); start 7414 fs/btrfs/inode.c u64 start, u64 len) start 7423 fs/btrfs/inode.c alloc_hint = get_extent_allocation_hint(inode, start, len); start 7429 fs/btrfs/inode.c em = btrfs_create_dio_extent(inode, start, ins.offset, start, start 7669 fs/btrfs/inode.c static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, start 7690 fs/btrfs/inode.c em->start = start; start 7708 fs/btrfs/inode.c btrfs_drop_extent_cache(BTRFS_I(inode), em->start, start 7709 fs/btrfs/inode.c em->start + em->len - 1, 0); start 7732 fs/btrfs/inode.c u64 start, u64 len) start 7738 fs/btrfs/inode.c len = min(len, em->len - (start - em->start)); start 7740 fs/btrfs/inode.c bh_result->b_blocknr = (em->block_start + (start - em->start)) >> start 7753 fs/btrfs/inode.c u64 start, u64 len) start 7778 fs/btrfs/inode.c len = min(len, em->len - (start - em->start)); start 7779 fs/btrfs/inode.c block_start = em->block_start + (start - em->start); start 7781 fs/btrfs/inode.c if (can_nocow_extent(inode, start, &len, &orig_start, start 7786 fs/btrfs/inode.c em2 = btrfs_create_dio_extent(inode, start, len, start 7805 fs/btrfs/inode.c btrfs_free_reserved_data_space_noquota(inode, start, start 7814 fs/btrfs/inode.c *map = em = btrfs_new_extent_direct(inode, start, len); start 7820 fs/btrfs/inode.c len = min(len, em->len - (start - em->start)); start 7823 fs/btrfs/inode.c bh_result->b_blocknr = (em->block_start + (start - em->start)) >> start 7836 fs/btrfs/inode.c if (!dio_data->overwrite && start + len > i_size_read(inode)) start 7837 fs/btrfs/inode.c i_size_write(inode, start + len); start 7841 fs/btrfs/inode.c dio_data->unsubmitted_oe_range_end = start + len; start 7854 fs/btrfs/inode.c u64 start = iblock << inode->i_blkbits; start 7862 fs/btrfs/inode.c lockstart = start; start 7863 fs/btrfs/inode.c lockend = start + len - 1; start 7885 fs/btrfs/inode.c em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); start 7914 fs/btrfs/inode.c dio_data, start, len); start 7922 fs/btrfs/inode.c start, len); start 7933 fs/btrfs/inode.c lockstart = start + bh_result->b_size; start 8011 fs/btrfs/inode.c u64 start, u64 end, int failed_mirror, start 8027 fs/btrfs/inode.c ret = btrfs_get_io_failure_record(inode, start, end, &failrec); start 8044 fs/btrfs/inode.c isector = start - btrfs_io_bio(failed_bio)->logical; start 8066 fs/btrfs/inode.c u64 start; start 8090 fs/btrfs/inode.c io_tree, done->start, bvec->bv_page, start 8104 fs/btrfs/inode.c u64 start; start 8114 fs/btrfs/inode.c start = io_bio->logical; start 8124 fs/btrfs/inode.c done.start = start; start 8128 fs/btrfs/inode.c pgoff, start, start + sectorsize - 1, start 8144 fs/btrfs/inode.c start += sectorsize; start 8183 fs/btrfs/inode.c bvec->bv_offset, done->start, start 8187 fs/btrfs/inode.c failure_tree, io_tree, done->start, start 8209 fs/btrfs/inode.c u64 start; start 8223 fs/btrfs/inode.c start = io_bio->logical; start 8235 fs/btrfs/inode.c bvec.bv_page, pgoff, start, sectorsize); start 8241 fs/btrfs/inode.c done.start = start; start 8245 fs/btrfs/inode.c pgoff, start, start + sectorsize - 1, start 8261 fs/btrfs/inode.c start += sectorsize; start 8844 fs/btrfs/inode.c __u64 start, __u64 len) start 8852 fs/btrfs/inode.c return extent_fiemap(inode, fieinfo, start, len); start 8927 fs/btrfs/inode.c u64 start; start 8949 fs/btrfs/inode.c start = page_start; start 8950 fs/btrfs/inode.c ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, start 8951 fs/btrfs/inode.c page_end - start + 1); start 8959 fs/btrfs/inode.c clear_extent_bit(tree, start, end, start 8975 fs/btrfs/inode.c new_len = start - ordered->file_offset; start 8981 fs/btrfs/inode.c start, start 8982 fs/btrfs/inode.c end - start + 1, 1)) start 8988 fs/btrfs/inode.c lock_extent_bits(tree, start, end, start 8992 fs/btrfs/inode.c start = end + 1; start 8993 fs/btrfs/inode.c if (start < page_end) start 10469 fs/btrfs/inode.c u64 start, u64 num_bytes, u64 min_size, start 10478 fs/btrfs/inode.c u64 cur_offset = start; start 10479 fs/btrfs/inode.c u64 clear_offset = start; start 10485 fs/btrfs/inode.c u64 end = start + num_bytes - 1; start 10550 fs/btrfs/inode.c em->start = cur_offset; start 10610 fs/btrfs/inode.c u64 start, u64 num_bytes, u64 min_size, start 10613 fs/btrfs/inode.c return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, start 10620 fs/btrfs/inode.c u64 start, u64 num_bytes, u64 min_size, start 10623 fs/btrfs/inode.c return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, start 10712 fs/btrfs/inode.c void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) start 10715 fs/btrfs/inode.c unsigned long index = start >> PAGE_SHIFT; start 10796 fs/btrfs/inode.c u64 start; start 10821 fs/btrfs/inode.c if (bsi->start == 0) start 10858 fs/btrfs/inode.c u64 start; start 10910 fs/btrfs/inode.c start = 0; start 10911 fs/btrfs/inode.c while (start < isize) { start 10914 fs/btrfs/inode.c u64 len = isize - start; start 10916 fs/btrfs/inode.c em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); start 10945 fs/btrfs/inode.c logical_block_start = em->block_start + (start - em->start); start 10946 fs/btrfs/inode.c len = min(len, em->len - (start - em->start)); start 10950 fs/btrfs/inode.c ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL); start 10989 fs/btrfs/inode.c (logical_block_start - em->start)); start 10990 fs/btrfs/inode.c len = min(len, em->len - (logical_block_start - em->start)); start 11020 fs/btrfs/inode.c bsi.start = start; start 11025 fs/btrfs/inode.c start += len; start 638 fs/btrfs/ioctl.c btrfs_set_root_bytenr(root_item, leaf->start); start 1111 fs/btrfs/ioctl.c static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) start 1123 fs/btrfs/ioctl.c em = lookup_extent_mapping(em_tree, start, len); start 1128 fs/btrfs/ioctl.c u64 end = start + len - 1; start 1131 fs/btrfs/ioctl.c lock_extent_bits(io_tree, start, end, &cached); start 1132 fs/btrfs/ioctl.c em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); start 1133 fs/btrfs/ioctl.c unlock_extent_cached(io_tree, start, end, &cached); start 1148 fs/btrfs/ioctl.c if (em->start + em->len >= i_size_read(inode)) start 1151 fs/btrfs/ioctl.c next = defrag_lookup_extent(inode, em->start + em->len); start 1162 fs/btrfs/ioctl.c static int should_defrag_range(struct inode *inode, u64 start, u32 thresh, start 1175 fs/btrfs/ioctl.c if (start < *defrag_end) start 1180 fs/btrfs/ioctl.c em = defrag_lookup_extent(inode, start); start 1398 fs/btrfs/ioctl.c u64 newer_off = range->start; start 1414 fs/btrfs/ioctl.c if (range->start >= isize) start 1447 fs/btrfs/ioctl.c if (range->start + range->len > range->start) { start 1449 fs/btrfs/ioctl.c range->start + range->len - 1) >> PAGE_SHIFT; start 1458 fs/btrfs/ioctl.c range->start = newer_off; start 1467 fs/btrfs/ioctl.c i = range->start >> PAGE_SHIFT; start 1553 fs/btrfs/ioctl.c range->start = newer_off; start 3469 fs/btrfs/ioctl.c const u32 start = btrfs_file_extent_calc_inline_size(0); start 3471 fs/btrfs/ioctl.c memmove(inline_data + start, inline_data + start + skip, datal); start 4254 fs/btrfs/ioctl.c ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end, start 136 fs/btrfs/lzo.c u64 start, start 166 fs/btrfs/lzo.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); start 273 fs/btrfs/lzo.c start += PAGE_SIZE; start 274 fs/btrfs/lzo.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); start 329 fs/btrfs/lzo.c u64 disk_start = cb->start; start 175 fs/btrfs/ordered-data.c u64 start, u64 len, u64 disk_len, start 190 fs/btrfs/ordered-data.c entry->start = start; start 250 fs/btrfs/ordered-data.c u64 start, u64 len, u64 disk_len, int type) start 252 fs/btrfs/ordered-data.c return __btrfs_add_ordered_extent(inode, file_offset, start, len, start 258 fs/btrfs/ordered-data.c u64 start, u64 len, u64 disk_len, int type) start 260 fs/btrfs/ordered-data.c return __btrfs_add_ordered_extent(inode, file_offset, start, len, start 266 fs/btrfs/ordered-data.c u64 start, u64 len, u64 disk_len, start 269 fs/btrfs/ordered-data.c return __btrfs_add_ordered_extent(inode, file_offset, start, len, start 537 fs/btrfs/ordered-data.c if (range_end <= ordered->start || start 538 fs/btrfs/ordered-data.c ordered->start + ordered->disk_len <= range_start) { start 625 fs/btrfs/ordered-data.c u64 start = entry->file_offset; start 626 fs/btrfs/ordered-data.c u64 end = start + entry->len - 1; start 636 fs/btrfs/ordered-data.c filemap_fdatawrite_range(inode->i_mapping, start, end); start 646 fs/btrfs/ordered-data.c int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) start 654 fs/btrfs/ordered-data.c if (start + len < start) { start 657 fs/btrfs/ordered-data.c orig_end = start + len - 1; start 665 fs/btrfs/ordered-data.c ret = btrfs_fdatawrite_range(inode, start, orig_end); start 676 fs/btrfs/ordered-data.c ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); start 687 fs/btrfs/ordered-data.c if (ordered->file_offset + ordered->len <= start) { start 701 fs/btrfs/ordered-data.c if (end == 0 || end == start) start 988 fs/btrfs/ordered-data.c struct btrfs_inode *inode, u64 start, start 1000 fs/btrfs/ordered-data.c lock_extent_bits(tree, start, end, cachedp); start 1001 fs/btrfs/ordered-data.c ordered = btrfs_lookup_ordered_range(inode, start, start 1002 fs/btrfs/ordered-data.c end - start + 1); start 1013 fs/btrfs/ordered-data.c unlock_extent_cached(tree, start, end, cachedp); start 71 fs/btrfs/ordered-data.h u64 start; start 164 fs/btrfs/ordered-data.h u64 start, u64 len, u64 disk_len, int type); start 166 fs/btrfs/ordered-data.h u64 start, u64 len, u64 disk_len, int type); start 168 fs/btrfs/ordered-data.h u64 start, u64 len, u64 disk_len, start 176 fs/btrfs/ordered-data.h int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); start 192 fs/btrfs/ordered-data.h struct btrfs_inode *inode, u64 start, start 120 fs/btrfs/print-tree.c eb->start, type); start 1880 fs/btrfs/qgroup.c ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, start 1885 fs/btrfs/qgroup.c dst_path->nodes[dst_level]->start, start 3408 fs/btrfs/qgroup.c struct extent_changeset **reserved_ret, u64 start, start 3434 fs/btrfs/qgroup.c ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, start 3435 fs/btrfs/qgroup.c start + len -1, EXTENT_QGROUP_RESERVED, reserved); start 3439 fs/btrfs/qgroup.c trace_btrfs_qgroup_reserve_data(inode, start, len, start 3464 fs/btrfs/qgroup.c struct extent_changeset *reserved, u64 start, u64 len) start 3474 fs/btrfs/qgroup.c len = round_up(start + len, root->fs_info->sectorsize); start 3475 fs/btrfs/qgroup.c start = round_down(start, root->fs_info->sectorsize); start 3488 fs/btrfs/qgroup.c if (range_start >= start + len || start 3489 fs/btrfs/qgroup.c range_start + range_len <= start) start 3491 fs/btrfs/qgroup.c free_start = max(range_start, start); start 3492 fs/btrfs/qgroup.c free_len = min(start + len, range_start + range_len) - start 3518 fs/btrfs/qgroup.c struct extent_changeset *reserved, u64 start, u64 len, start 3532 fs/btrfs/qgroup.c return qgroup_free_reserved_data(inode, reserved, start, len); start 3534 fs/btrfs/qgroup.c ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, start 3535 fs/btrfs/qgroup.c start + len -1, EXTENT_QGROUP_RESERVED, &changeset); start 3541 fs/btrfs/qgroup.c trace_btrfs_qgroup_release_data(inode, start, len, start 3566 fs/btrfs/qgroup.c struct extent_changeset *reserved, u64 start, u64 len) start 3568 fs/btrfs/qgroup.c return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); start 3586 fs/btrfs/qgroup.c int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) start 3588 fs/btrfs/qgroup.c return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); start 3976 fs/btrfs/qgroup.c if (block->subvol_bytenr < subvol_eb->start) { start 3978 fs/btrfs/qgroup.c } else if (block->subvol_bytenr > subvol_eb->start) { start 4023 fs/btrfs/qgroup.c subvol_eb->start, ret); start 348 fs/btrfs/qgroup.h struct extent_changeset **reserved, u64 start, u64 len); start 349 fs/btrfs/qgroup.h int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len); start 351 fs/btrfs/qgroup.h struct extent_changeset *reserved, u64 start, u64 len); start 1161 fs/btrfs/raid56.c u64 start; start 1171 fs/btrfs/raid56.c start = (u64)bio->bi_iter.bi_sector << 9; start 1172 fs/btrfs/raid56.c stripe_offset = start - rbio->bbio->raid_map[0]; start 65 fs/btrfs/reada.c u64 start; start 207 fs/btrfs/reada.c eb->start >> PAGE_SHIFT); start 231 fs/btrfs/reada.c u64 start; start 239 fs/btrfs/reada.c if (ret == 1 && logical >= zone->start && logical <= zone->end) { start 251 fs/btrfs/reada.c start = cache->key.objectid; start 252 fs/btrfs/reada.c end = start + cache->key.offset - 1; start 265 fs/btrfs/reada.c zone->start = start; start 288 fs/btrfs/reada.c if (ret == 1 && logical >= zone->start && logical <= zone->end) start 635 fs/btrfs/reada.c dev->reada_next = dev->reada_curr_zone->start; start 852 fs/btrfs/reada.c zone->start, zone->end, zone->elems, start 860 fs/btrfs/reada.c device->reada_next - zone->start); start 879 fs/btrfs/reada.c re->zones[i]->start, start 911 fs/btrfs/reada.c re->zones[i]->start, start 932 fs/btrfs/reada.c u64 start; start 955 fs/btrfs/reada.c start = node->start; start 959 fs/btrfs/reada.c ret = reada_add_block(rc, start, &max_key, generation); start 912 fs/btrfs/ref-verify.c void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start, start 925 fs/btrfs/ref-verify.c if (entry->bytenr < start) { start 927 fs/btrfs/ref-verify.c } else if (entry->bytenr > start) { start 935 fs/btrfs/ref-verify.c (entry->bytenr < start && be->bytenr > start) || start 936 fs/btrfs/ref-verify.c (entry->bytenr < start && entry->bytenr > be->bytenr)) start 953 fs/btrfs/ref-verify.c if (be->bytenr < start && be->bytenr + be->len > start) { start 956 fs/btrfs/ref-verify.c start, len); start 960 fs/btrfs/ref-verify.c if (be->bytenr < start) start 962 fs/btrfs/ref-verify.c if (be->bytenr >= start + len) start 964 fs/btrfs/ref-verify.c if (be->bytenr + be->len > start + len) { start 967 fs/btrfs/ref-verify.c start, len); start 14 fs/btrfs/ref-verify.h void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start, start 39 fs/btrfs/ref-verify.h u64 start, u64 len) start 142 fs/btrfs/relocation.c u64 start; start 958 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, eb->start); start 966 fs/btrfs/relocation.c upper->bytenr = eb->start; start 1217 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, src->commit_root->start); start 1223 fs/btrfs/relocation.c BUG_ON(node->new_bytenr != reloc_root->node->start); start 1228 fs/btrfs/relocation.c reloc_root->commit_root->start); start 1243 fs/btrfs/relocation.c new_node->bytenr = dest->node->start; start 1301 fs/btrfs/relocation.c node->bytenr = root->commit_root->start; start 1332 fs/btrfs/relocation.c root->commit_root->start); start 1363 fs/btrfs/relocation.c root->commit_root->start); start 1375 fs/btrfs/relocation.c node->bytenr = root->node->start; start 1432 fs/btrfs/relocation.c btrfs_set_root_bytenr(root_item, eb->start); start 1687 fs/btrfs/relocation.c parent = leaf->start; start 1975 fs/btrfs/relocation.c blocksize, path->nodes[level]->start); start 1988 fs/btrfs/relocation.c blocksize, path->nodes[level]->start); start 2115 fs/btrfs/relocation.c u64 start, end; start 2144 fs/btrfs/relocation.c start = 0; start 2146 fs/btrfs/relocation.c start = min_key->offset; start 2147 fs/btrfs/relocation.c WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); start 2150 fs/btrfs/relocation.c start = 0; start 2170 fs/btrfs/relocation.c lock_extent(&BTRFS_I(inode)->io_tree, start, end); start 2171 fs/btrfs/relocation.c btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); start 2172 fs/btrfs/relocation.c unlock_extent(&BTRFS_I(inode)->io_tree, start, end); start 2637 fs/btrfs/relocation.c if (next->new_bytenr != root->node->start) { start 2640 fs/btrfs/relocation.c next->new_bytenr = root->node->start; start 2835 fs/btrfs/relocation.c if (node->eb->start == bytenr) start 2881 fs/btrfs/relocation.c upper->eb->start); start 2886 fs/btrfs/relocation.c if (node->eb->start == bytenr) start 2918 fs/btrfs/relocation.c node->eb->start); start 2924 fs/btrfs/relocation.c node->eb->start, blocksize, start 2925 fs/btrfs/relocation.c upper->eb->start); start 3112 fs/btrfs/relocation.c node->new_bytenr = root->node->start; start 3200 fs/btrfs/relocation.c u64 start; start 3206 fs/btrfs/relocation.c u64 prealloc_start = cluster->start - offset; start 3211 fs/btrfs/relocation.c BUG_ON(cluster->start != cluster->boundary[0]); start 3221 fs/btrfs/relocation.c start = cluster->boundary[nr] - offset; start 3227 fs/btrfs/relocation.c lock_extent(&BTRFS_I(inode)->io_tree, start, end); start 3228 fs/btrfs/relocation.c num_bytes = end + 1 - start; start 3229 fs/btrfs/relocation.c if (cur_offset < start) start 3231 fs/btrfs/relocation.c cur_offset, start - cur_offset); start 3232 fs/btrfs/relocation.c ret = btrfs_prealloc_file_range(inode, 0, start, start 3236 fs/btrfs/relocation.c unlock_extent(&BTRFS_I(inode)->io_tree, start, end); start 3251 fs/btrfs/relocation.c int setup_extent_mapping(struct inode *inode, u64 start, u64 end, start 3263 fs/btrfs/relocation.c em->start = start; start 3264 fs/btrfs/relocation.c em->len = end + 1 - start; start 3270 fs/btrfs/relocation.c lock_extent(&BTRFS_I(inode)->io_tree, start, end); start 3279 fs/btrfs/relocation.c btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); start 3281 fs/btrfs/relocation.c unlock_extent(&BTRFS_I(inode)->io_tree, start, end); start 3313 fs/btrfs/relocation.c ret = setup_extent_mapping(inode, cluster->start - offset, start 3314 fs/btrfs/relocation.c cluster->end - offset, cluster->start); start 3318 fs/btrfs/relocation.c index = (cluster->start - offset) >> PAGE_SHIFT; start 3427 fs/btrfs/relocation.c cluster->start = extent_key->objectid; start 3591 fs/btrfs/relocation.c eb->start, btrfs_header_level(eb), 1, start 3724 fs/btrfs/relocation.c rb_node = tree_search(blocks, leaf->start); start 3750 fs/btrfs/relocation.c rb_node = tree_search(blocks, leaf->start); start 3784 fs/btrfs/relocation.c if (!tree_block_processed(leaf->start, rc)) { start 3790 fs/btrfs/relocation.c block->bytenr = leaf->start; start 3853 fs/btrfs/relocation.c eb->start, path->slots[0]); start 3919 fs/btrfs/relocation.c u64 start, end, last; start 3975 fs/btrfs/relocation.c key.objectid, &start, &end, start 3978 fs/btrfs/relocation.c if (ret == 0 && start <= key.objectid) { start 4696 fs/btrfs/relocation.c new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); start 4734 fs/btrfs/relocation.c BUG_ON(node->bytenr != buf->start && start 4735 fs/btrfs/relocation.c node->new_bytenr != buf->start); start 4740 fs/btrfs/relocation.c node->new_bytenr = cow->start; start 112 fs/btrfs/root-tree.c btrfs_set_root_bytenr(item, node->start); start 2366 fs/btrfs/scrub.c u64 start, u64 len) start 2378 fs/btrfs/scrub.c start -= sparity->logic_start; start 2379 fs/btrfs/scrub.c start = div64_u64_rem(start, sparity->stripe_len, &offset); start 2396 fs/btrfs/scrub.c u64 start, u64 len) start 2398 fs/btrfs/scrub.c __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); start 2402 fs/btrfs/scrub.c u64 start, u64 len) start 2404 fs/btrfs/scrub.c __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); start 2426 fs/btrfs/scrub.c u64 start = sblock->pagev[0]->logical; start 2431 fs/btrfs/scrub.c start, end - start); start 3447 fs/btrfs/scrub.c if (em->start != chunk_offset) start 3470 fs/btrfs/scrub.c struct btrfs_device *scrub_dev, u64 start, u64 end) start 3538 fs/btrfs/scrub.c if (found_key.offset + length <= start) start 3791 fs/btrfs/scrub.c int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, start 3928 fs/btrfs/scrub.c ret = scrub_enumerate_chunks(sctx, dev, start, end); start 45 fs/btrfs/send.c char *start; start 337 fs/btrfs/send.c p->start = p->buf + p->buf_len - 1; start 338 fs/btrfs/send.c p->end = p->start; start 339 fs/btrfs/send.c *p->start = 0; start 341 fs/btrfs/send.c p->start = p->buf; start 342 fs/btrfs/send.c p->end = p->start; start 343 fs/btrfs/send.c *p->start = 0; start 384 fs/btrfs/send.c return p->end - p->start; start 403 fs/btrfs/send.c path_len = p->end - p->start; start 428 fs/btrfs/send.c p->start = p->end - path_len; start 429 fs/btrfs/send.c memmove(p->start, tmp_buf, path_len + 1); start 431 fs/btrfs/send.c p->start = p->buf; start 432 fs/btrfs/send.c p->end = p->start + path_len; start 443 fs/btrfs/send.c new_len = p->end - p->start + name_len; start 444 fs/btrfs/send.c if (p->start != p->end) start 451 fs/btrfs/send.c if (p->start != p->end) start 452 fs/btrfs/send.c *--p->start = '/'; start 453 fs/btrfs/send.c p->start -= name_len; start 454 fs/btrfs/send.c *prepared = p->start; start 456 fs/btrfs/send.c if (p->start != p->end) start 486 fs/btrfs/send.c ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); start 489 fs/btrfs/send.c memcpy(prepared, p2->start, p2->end - p2->start); start 533 fs/btrfs/send.c tmp = p->start; start 534 fs/btrfs/send.c len = p->end - p->start; start 535 fs/btrfs/send.c p->start = p->buf; start 536 fs/btrfs/send.c p->end = p->start + len; start 537 fs/btrfs/send.c memmove(p->start, tmp, len + 1); start 654 fs/btrfs/send.c ret = tlv_put_string(sctx, attrtype, p->start, \ start 655 fs/btrfs/send.c p->end - p->start); \ start 734 fs/btrfs/send.c btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); start 759 fs/btrfs/send.c btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); start 783 fs/btrfs/send.c btrfs_debug(fs_info, "send_unlink %s", path->start); start 806 fs/btrfs/send.c btrfs_debug(fs_info, "send_rmdir %s", path->start); start 903 fs/btrfs/send.c char *start; start 953 fs/btrfs/send.c start = btrfs_ref_to_path(root, tmp_path, name_len, start 956 fs/btrfs/send.c if (IS_ERR(start)) { start 957 fs/btrfs/send.c ret = PTR_ERR(start); start 960 fs/btrfs/send.c if (start < p->buf) { start 963 fs/btrfs/send.c p->buf_len + p->buf - start); start 966 fs/btrfs/send.c start = btrfs_ref_to_path(root, tmp_path, start 970 fs/btrfs/send.c if (IS_ERR(start)) { start 971 fs/btrfs/send.c ret = PTR_ERR(start); start 974 fs/btrfs/send.c BUG_ON(start < p->buf); start 976 fs/btrfs/send.c p->start = start; start 1825 fs/btrfs/send.c ret = !memcmp(tmp_name->start, name, name_len); start 2006 fs/btrfs/send.c name->start, fs_path_len(name)); start 2208 fs/btrfs/send.c dest->start, dest->end - dest->start); start 2235 fs/btrfs/send.c strcpy(nce->name, dest->start); start 2773 fs/btrfs/send.c ref->name = (char *)kbasename(ref->full_path->start); start 3739 fs/btrfs/send.c memcmp(path_before->start, path_after->start, len1))) { start 4303 fs/btrfs/send.c strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { start 5621 fs/btrfs/send.c const u64 start, start 5627 fs/btrfs/send.c u64 search_start = start; start 5678 fs/btrfs/send.c if (extent_end <= start) start 19 fs/btrfs/tests/extent-io-tests.c static noinline int process_page_range(struct inode *inode, u64 start, u64 end, start 24 fs/btrfs/tests/extent-io-tests.c unsigned long index = start >> PAGE_SHIFT; start 52 fs/btrfs/tests/extent-io-tests.c start, end, nr_pages, ret); start 69 fs/btrfs/tests/extent-io-tests.c u64 start, end, test_start; start 114 fs/btrfs/tests/extent-io-tests.c start = 0; start 116 fs/btrfs/tests/extent-io-tests.c found = find_lock_delalloc_range(inode, locked_page, &start, start 122 fs/btrfs/tests/extent-io-tests.c if (start != 0 || end != (sectorsize - 1)) { start 124 fs/btrfs/tests/extent-io-tests.c sectorsize - 1, start, end); start 127 fs/btrfs/tests/extent-io-tests.c unlock_extent(tmp, start, end); start 145 fs/btrfs/tests/extent-io-tests.c start = test_start; start 147 fs/btrfs/tests/extent-io-tests.c found = find_lock_delalloc_range(inode, locked_page, &start, start 153 fs/btrfs/tests/extent-io-tests.c if (start != test_start || end != max_bytes - 1) { start 155 fs/btrfs/tests/extent-io-tests.c test_start, max_bytes - 1, start, end); start 158 fs/btrfs/tests/extent-io-tests.c if (process_page_range(inode, start, end, start 163 fs/btrfs/tests/extent-io-tests.c unlock_extent(tmp, start, end); start 179 fs/btrfs/tests/extent-io-tests.c start = test_start; start 181 fs/btrfs/tests/extent-io-tests.c found = find_lock_delalloc_range(inode, locked_page, &start, start 200 fs/btrfs/tests/extent-io-tests.c start = test_start; start 202 fs/btrfs/tests/extent-io-tests.c found = find_lock_delalloc_range(inode, locked_page, &start, start 208 fs/btrfs/tests/extent-io-tests.c if (start != test_start || end != total_dirty - 1) { start 210 fs/btrfs/tests/extent-io-tests.c test_start, total_dirty - 1, start, end); start 213 fs/btrfs/tests/extent-io-tests.c if (process_page_range(inode, start, end, start 218 fs/btrfs/tests/extent-io-tests.c unlock_extent(tmp, start, end); start 235 fs/btrfs/tests/extent-io-tests.c start = test_start; start 243 fs/btrfs/tests/extent-io-tests.c found = find_lock_delalloc_range(inode, locked_page, &start, start 249 fs/btrfs/tests/extent-io-tests.c if (start != test_start && end != test_start + PAGE_SIZE - 1) { start 251 fs/btrfs/tests/extent-io-tests.c test_start, test_start + PAGE_SIZE - 1, start, end); start 254 fs/btrfs/tests/extent-io-tests.c if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | start 440 fs/btrfs/tests/extent-io-tests.c u64 start, end; start 448 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); start 449 fs/btrfs/tests/extent-io-tests.c if (start != 0 || end != -1) { start 452 fs/btrfs/tests/extent-io-tests.c start, end); start 462 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, start 465 fs/btrfs/tests/extent-io-tests.c if (start != 0 || end != SZ_1M - 1) { start 467 fs/btrfs/tests/extent-io-tests.c start, end); start 478 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end, start 481 fs/btrfs/tests/extent-io-tests.c if (start != SZ_4M || end != SZ_32M - 1) { start 483 fs/btrfs/tests/extent-io-tests.c start, end); start 491 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, SZ_2M, &start, &end, start 494 fs/btrfs/tests/extent-io-tests.c if (start != SZ_4M || end != SZ_32M - 1) { start 496 fs/btrfs/tests/extent-io-tests.c start, end); start 505 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, start 508 fs/btrfs/tests/extent-io-tests.c if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { start 510 fs/btrfs/tests/extent-io-tests.c start, end); start 514 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end, start 521 fs/btrfs/tests/extent-io-tests.c if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { start 523 fs/btrfs/tests/extent-io-tests.c start, end); start 531 fs/btrfs/tests/extent-io-tests.c find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); start 532 fs/btrfs/tests/extent-io-tests.c if (start != SZ_64M + SZ_8M || end != -1) { start 535 fs/btrfs/tests/extent-io-tests.c start, end); start 24 fs/btrfs/tests/extent-map-tests.c em->start, em->len, em->block_start, start 54 fs/btrfs/tests/extent-map-tests.c u64 start = 0; start 65 fs/btrfs/tests/extent-map-tests.c em->start = 0; start 86 fs/btrfs/tests/extent-map-tests.c em->start = SZ_16K; start 107 fs/btrfs/tests/extent-map-tests.c em->start = start; start 109 fs/btrfs/tests/extent-map-tests.c em->block_start = start; start 112 fs/btrfs/tests/extent-map-tests.c ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); start 115 fs/btrfs/tests/extent-map-tests.c test_err("case1 [%llu %llu]: ret %d", start, start + len, ret); start 119 fs/btrfs/tests/extent-map-tests.c (em->start != 0 || extent_map_end(em) != SZ_16K || start 123 fs/btrfs/tests/extent-map-tests.c start, start + len, ret, em->start, em->len, start 153 fs/btrfs/tests/extent-map-tests.c em->start = 0; start 174 fs/btrfs/tests/extent-map-tests.c em->start = SZ_4K; start 195 fs/btrfs/tests/extent-map-tests.c em->start = 0; start 200 fs/btrfs/tests/extent-map-tests.c ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); start 207 fs/btrfs/tests/extent-map-tests.c (em->start != 0 || extent_map_end(em) != SZ_1K || start 211 fs/btrfs/tests/extent-map-tests.c ret, em->start, em->len, em->block_start, start 223 fs/btrfs/tests/extent-map-tests.c struct extent_map_tree *em_tree, u64 start) start 236 fs/btrfs/tests/extent-map-tests.c em->start = SZ_4K; start 257 fs/btrfs/tests/extent-map-tests.c em->start = 0; start 262 fs/btrfs/tests/extent-map-tests.c ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); start 266 fs/btrfs/tests/extent-map-tests.c start, start + len, ret); start 274 fs/btrfs/tests/extent-map-tests.c (start < em->start || start + len > extent_map_end(em) || start 275 fs/btrfs/tests/extent-map-tests.c em->start != em->block_start || em->len != em->block_len)) { start 278 fs/btrfs/tests/extent-map-tests.c start, start + len, ret, em->start, em->len, start 322 fs/btrfs/tests/extent-map-tests.c struct extent_map_tree *em_tree, u64 start) start 335 fs/btrfs/tests/extent-map-tests.c em->start = 0; start 356 fs/btrfs/tests/extent-map-tests.c em->start = SZ_8K; start 376 fs/btrfs/tests/extent-map-tests.c em->start = 0; start 381 fs/btrfs/tests/extent-map-tests.c ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); start 385 fs/btrfs/tests/extent-map-tests.c start, len, ret); start 388 fs/btrfs/tests/extent-map-tests.c if (em && (start < em->start || start + len > extent_map_end(em))) { start 391 fs/btrfs/tests/extent-map-tests.c start, len, ret, em->start, em->len, em->block_start, start 15 fs/btrfs/tests/free-space-tree-tests.c u64 start; start 66 fs/btrfs/tests/free-space-tree-tests.c extent_start != extents[i].start || start 77 fs/btrfs/tests/free-space-tree-tests.c extent_start != extents[i].start || start 92 fs/btrfs/tests/free-space-tree-tests.c key.objectid != extents[i].start || start 15 fs/btrfs/tests/inode-tests.c static void insert_extent(struct btrfs_root *root, u64 start, u64 len, start 34 fs/btrfs/tests/inode-tests.c key.offset = start; start 295 fs/btrfs/tests/inode-tests.c if (em->start != 0 || em->len != 5) { start 298 fs/btrfs/tests/inode-tests.c em->start, em->len); start 305 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 318 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != (sectorsize - 5)) { start 321 fs/btrfs/tests/inode-tests.c offset, em->start, em->len); start 333 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 345 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != 4) { start 348 fs/btrfs/tests/inode-tests.c offset, em->start, em->len); start 355 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 368 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize - 1) { start 371 fs/btrfs/tests/inode-tests.c offset, em->start, em->len); start 378 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 379 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 383 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 396 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 399 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 406 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 407 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 412 fs/btrfs/tests/inode-tests.c orig_start = em->start; start 413 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 425 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 428 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 435 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 447 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != 2 * sectorsize) { start 450 fs/btrfs/tests/inode-tests.c offset, 2 * sectorsize, em->start, em->len); start 462 fs/btrfs/tests/inode-tests.c disk_bytenr += (em->start - orig_start); start 468 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 481 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 484 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 492 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 493 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 497 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 510 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 513 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 521 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 522 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 527 fs/btrfs/tests/inode-tests.c orig_start = em->start; start 528 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 540 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 543 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 555 fs/btrfs/tests/inode-tests.c if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) { start 557 fs/btrfs/tests/inode-tests.c disk_bytenr + (em->start - em->orig_start), start 561 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 573 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != 2 * sectorsize) { start 576 fs/btrfs/tests/inode-tests.c offset, 2 * sectorsize, em->start, em->len); start 589 fs/btrfs/tests/inode-tests.c if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) { start 591 fs/btrfs/tests/inode-tests.c disk_bytenr + (em->start - em->orig_start), start 595 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 608 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != 2 * sectorsize) { start 611 fs/btrfs/tests/inode-tests.c offset, 2 * sectorsize, em->start, em->len); start 619 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 621 fs/btrfs/tests/inode-tests.c em->start, em->orig_start); start 629 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 642 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 645 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 653 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 655 fs/btrfs/tests/inode-tests.c em->start, em->orig_start); start 664 fs/btrfs/tests/inode-tests.c orig_start = em->start; start 665 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 677 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 680 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 687 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 688 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 692 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 705 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != 2 * sectorsize) { start 708 fs/btrfs/tests/inode-tests.c offset, 2 * sectorsize, em->start, em->len); start 718 fs/btrfs/tests/inode-tests.c em->start, orig_start); start 726 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 740 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 743 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 750 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 751 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 755 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 772 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != 3 * sectorsize) { start 775 fs/btrfs/tests/inode-tests.c offset, 3 * sectorsize, em->start, em->len); start 783 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 784 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 788 fs/btrfs/tests/inode-tests.c offset = em->start + em->len; start 800 fs/btrfs/tests/inode-tests.c if (em->start != offset || em->len != sectorsize) { start 803 fs/btrfs/tests/inode-tests.c offset, sectorsize, em->start, em->len); start 810 fs/btrfs/tests/inode-tests.c if (em->orig_start != em->start) { start 811 fs/btrfs/tests/inode-tests.c test_err("wrong orig offset, want %llu, have %llu", em->start, start 884 fs/btrfs/tests/inode-tests.c if (em->start != 0 || em->len != sectorsize) { start 887 fs/btrfs/tests/inode-tests.c sectorsize, em->start, em->len); start 907 fs/btrfs/tests/inode-tests.c if (em->start != sectorsize || em->len != sectorsize) { start 910 fs/btrfs/tests/inode-tests.c sectorsize, sectorsize, em->start, em->len); start 919 fs/btrfs/transaction.c u64 start = 0; start 923 fs/btrfs/transaction.c while (!find_first_extent_bit(dirty_pages, start, &start, &end, start 927 fs/btrfs/transaction.c err = convert_extent_bit(dirty_pages, start, end, start 948 fs/btrfs/transaction.c err = filemap_fdatawrite_range(mapping, start, end); start 952 fs/btrfs/transaction.c werr = filemap_fdatawait_range(mapping, start, end); start 956 fs/btrfs/transaction.c start = end + 1; start 975 fs/btrfs/transaction.c u64 start = 0; start 978 fs/btrfs/transaction.c while (!find_first_extent_bit(dirty_pages, start, &start, &end, start 988 fs/btrfs/transaction.c err = clear_extent_bit(dirty_pages, start, end, start 993 fs/btrfs/transaction.c err = filemap_fdatawait_range(mapping, start, end); start 999 fs/btrfs/transaction.c start = end + 1; start 1097 fs/btrfs/transaction.c if (old_root_bytenr == root->node->start && start 532 fs/btrfs/tree-checker.c is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET); start 558 fs/btrfs/tree-checker.c BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot, start 946 fs/btrfs/tree-checker.c eb->start, slot, bytenr, len, &vaf); start 1464 fs/btrfs/tree-checker.c btrfs_header_owner(node), node->start, start 97 fs/btrfs/tree-log.c const loff_t start, start 231 fs/btrfs/tree-log.c return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, start 232 fs/btrfs/tree-log.c buf->start + buf->len - 1); start 238 fs/btrfs/tree-log.c buf->start, buf->start + buf->len - 1); start 314 fs/btrfs/tree-log.c ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, start 589 fs/btrfs/tree-log.c u64 start = key->offset; start 602 fs/btrfs/tree-log.c extent_end = start + nbytes; start 613 fs/btrfs/tree-log.c extent_end = ALIGN(start + size, start 632 fs/btrfs/tree-log.c btrfs_ino(BTRFS_I(inode)), start, 0); start 663 fs/btrfs/tree-log.c ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); start 2838 fs/btrfs/tree-log.c path->nodes[*level]->start, start 2920 fs/btrfs/tree-log.c next->start, next->len); start 3242 fs/btrfs/tree-log.c log_root_tree->node->start); start 4090 fs/btrfs/tree-log.c if (em1->start < em2->start) start 4092 fs/btrfs/tree-log.c else if (em1->start > em2->start) start 4117 fs/btrfs/tree-log.c csum_offset = em->mod_start - em->start; start 4153 fs/btrfs/tree-log.c u64 extent_offset = em->start - em->orig_start; start 4162 fs/btrfs/tree-log.c ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, start 4163 fs/btrfs/tree-log.c em->start + em->len, NULL, 0, 1, start 4171 fs/btrfs/tree-log.c key.offset = em->start; start 4372 fs/btrfs/tree-log.c const u64 start, start 4401 fs/btrfs/tree-log.c if ((em->start > end || em->start + em->len <= start) && start 4423 fs/btrfs/tree-log.c em->start >= i_size_read(&inode->vfs_inode)) start 5027 fs/btrfs/tree-log.c const loff_t start, start 5334 fs/btrfs/tree-log.c ctx, start, end); start 5365 fs/btrfs/tree-log.c if (em->mod_start >= start && mod_end <= end) start 5989 fs/btrfs/tree-log.c const loff_t start, start 6043 fs/btrfs/tree-log.c ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); start 6139 fs/btrfs/tree-log.c const loff_t start, start 6147 fs/btrfs/tree-log.c start, end, LOG_INODE_ALL, ctx); start 6247 fs/btrfs/tree-log.c log->node->start, start 52 fs/btrfs/tree-log.h const loff_t start, start 1548 fs/btrfs/volumes.c static bool contains_pending_extent(struct btrfs_device *device, u64 *start, start 1555 fs/btrfs/volumes.c if (!find_first_extent_bit(&device->alloc_state, *start, start 1559 fs/btrfs/volumes.c if (in_range(physical_start, *start, len) || start 1560 fs/btrfs/volumes.c in_range(*start, physical_start, start 1562 fs/btrfs/volumes.c *start = physical_end + 1; start 1598 fs/btrfs/volumes.c u64 num_bytes, u64 search_start, u64 *start, start 1748 fs/btrfs/volumes.c *start = max_hole_start; start 1755 fs/btrfs/volumes.c u64 *start, u64 *len) start 1758 fs/btrfs/volumes.c return find_free_dev_extent_start(device, num_bytes, 0, start, len); start 1763 fs/btrfs/volumes.c u64 start, u64 *dev_extent_len) start 1779 fs/btrfs/volumes.c key.offset = start; start 1792 fs/btrfs/volumes.c BUG_ON(found_key.offset > start || found_key.offset + start 1793 fs/btrfs/volumes.c btrfs_dev_extent_length(leaf, extent) < start); start 1822 fs/btrfs/volumes.c u64 chunk_offset, u64 start, u64 num_bytes) start 1839 fs/btrfs/volumes.c key.offset = start; start 1874 fs/btrfs/volumes.c ret = em->start + em->len; start 3000 fs/btrfs/volumes.c if (em->start > logical || em->start + em->len < logical) { start 3003 fs/btrfs/volumes.c logical, length, em->start, em->start + em->len); start 4734 fs/btrfs/volumes.c u64 start; start 4737 fs/btrfs/volumes.c start = new_size; start 4768 fs/btrfs/volumes.c if (contains_pending_extent(device, &start, diff)) { start 4955 fs/btrfs/volumes.c u64 start, u64 type) start 5174 fs/btrfs/volumes.c trace_btrfs_chunk_alloc(info, map, start, chunk_size); start 5184 fs/btrfs/volumes.c em->start = start; start 5200 fs/btrfs/volumes.c ret = btrfs_make_block_group(trans, 0, type, start, chunk_size); start 5672 fs/btrfs/volumes.c offset = logical - em->start; start 5992 fs/btrfs/volumes.c offset = logical - em->start; start 6002 fs/btrfs/volumes.c stripe_offset, offset, em->start, logical, stripe_len); start 6256 fs/btrfs/volumes.c em->start + (tmp + i) * map->stripe_len; start 6787 fs/btrfs/volumes.c if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { start 6798 fs/btrfs/volumes.c if (em && em->start <= logical && em->start + em->len > logical) { start 6816 fs/btrfs/volumes.c em->start = logical; start 6870 fs/btrfs/volumes.c em->start, em->len, ret); start 7221 fs/btrfs/volumes.c em->start, missing, max_tolerated); start 7731 fs/btrfs/volumes.c physical_offset, devid, em->start, physical_len, start 7744 fs/btrfs/volumes.c em->start); start 7806 fs/btrfs/volumes.c em->start, em->map_lookup->verified_stripes, start 477 fs/btrfs/volumes.h u64 *start, u64 *max_avail); start 93 fs/btrfs/zlib.c u64 start, start 124 fs/btrfs/zlib.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); start 193 fs/btrfs/zlib.c start += PAGE_SIZE; start 195 fs/btrfs/zlib.c start >> PAGE_SHIFT); start 243 fs/btrfs/zlib.c u64 disk_start = cb->start; start 372 fs/btrfs/zstd.c u64 start, start 406 fs/btrfs/zstd.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); start 484 fs/btrfs/zstd.c start += PAGE_SIZE; start 486 fs/btrfs/zstd.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); start 555 fs/btrfs/zstd.c u64 disk_start = cb->start; start 1866 fs/buffer.c unsigned start, size; start 1868 fs/buffer.c start = max(from, block_start); start 1869 fs/buffer.c size = min(to, block_end) - start; start 1871 fs/buffer.c zero_user(page, start, size); start 2105 fs/buffer.c unsigned start; start 2107 fs/buffer.c start = pos & (PAGE_SIZE - 1); start 2125 fs/buffer.c page_zero_new_buffers(page, start+copied, start+len); start 2130 fs/buffer.c __block_commit_write(inode, page, start, start+copied); start 491 fs/cachefiles/namei.c unsigned long start; start 530 fs/cachefiles/namei.c start = jiffies; start 532 fs/cachefiles/namei.c cachefiles_hist(cachefiles_lookup_histogram, start); start 563 fs/cachefiles/namei.c start = jiffies; start 565 fs/cachefiles/namei.c cachefiles_hist(cachefiles_mkdir_histogram, start); start 599 fs/cachefiles/namei.c start = jiffies; start 601 fs/cachefiles/namei.c cachefiles_hist(cachefiles_create_histogram, start); start 759 fs/cachefiles/namei.c unsigned long start; start 769 fs/cachefiles/namei.c start = jiffies; start 771 fs/cachefiles/namei.c cachefiles_hist(cachefiles_lookup_histogram, start); start 870 fs/cachefiles/namei.c unsigned long start; start 879 fs/cachefiles/namei.c start = jiffies; start 881 fs/cachefiles/namei.c cachefiles_hist(cachefiles_lookup_histogram, start); start 77 fs/cachefiles/proc.c .start = cachefiles_histogram_start, start 547 fs/ceph/addr.c struct page *page, u64 start) start 570 fs/ceph/addr.c return end > start ? end - start : 0; start 2228 fs/ceph/caps.c int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 2239 fs/ceph/caps.c ret = file_write_and_wait_range(file, start, end); start 93 fs/ceph/file.c size_t start; start 97 fs/ceph/file.c ITER_GET_BVECS_PAGES, &start); start 107 fs/ceph/file.c .bv_len = min_t(int, bytes, PAGE_SIZE - start), start 108 fs/ceph/file.c .bv_offset = start, start 113 fs/ceph/file.c start = 0; start 110 fs/ceph/locks.c req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); start 127 fs/ceph/locks.c fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); start 128 fs/ceph/locks.c length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + start 360 fs/ceph/locks.c cephlock->start = cpu_to_le64(lock->fl_start); start 108 fs/ceph/mdsmap.c const void *start = *p; start 365 fs/ceph/mdsmap.c start, end - start, true); start 1050 fs/ceph/super.h extern int ceph_fsync(struct file *file, loff_t start, loff_t end, start 671 fs/ceph/xattr.c start: start 700 fs/ceph/xattr.c goto start; start 76 fs/cifs/cifs_unicode.h wchar_t start; start 333 fs/cifs/cifs_unicode.h while (rp->start) { start 334 fs/cifs/cifs_unicode.h if (uc < rp->start) /* Before start of range */ start 337 fs/cifs/cifs_unicode.h return uc + rp->table[uc - rp->start]; start 375 fs/cifs/cifs_unicode.h while (rp->start) { start 376 fs/cifs/cifs_unicode.h if (uc < rp->start) /* Before start of range */ start 379 fs/cifs/cifs_unicode.h return uc + rp->table[uc - rp->start]; start 1198 fs/cifs/cifsfs.c static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 87 fs/cifs/cifsfs.h extern int cifs_fiemap(struct inode *, struct fiemap_extent_info *, u64 start, start 1143 fs/cifs/cifspdu.h __le64 start; start 25 fs/cifs/cifsroot.c static __be32 __init parse_srvaddr(char *start, char *end) start 31 fs/cifs/cifsroot.c while (start < end && i < sizeof(addr) - 1) { start 32 fs/cifs/cifsroot.c if (isdigit(*start) || *start == '.') start 33 fs/cifs/cifsroot.c addr[i++] = *start; start 34 fs/cifs/cifsroot.c start++; start 2638 fs/cifs/cifssmb.c parm_data->start = cpu_to_le64(start_offset); start 2689 fs/cifs/cifssmb.c pLockData->fl_start = le64_to_cpu(parm_data->start); start 2548 fs/cifs/file.c int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, start 2559 fs/cifs/file.c rc = file_write_and_wait_range(file, start, end); start 2589 fs/cifs/file.c int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 2598 fs/cifs/file.c rc = file_write_and_wait_range(file, start, end); start 2833 fs/cifs/file.c size_t start; start 2868 fs/cifs/file.c from, &pagevec, cur_len, &start); start 2886 fs/cifs/file.c (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE; start 2897 fs/cifs/file.c wdata->page_offset = start; start 2900 fs/cifs/file.c cur_len - (PAGE_SIZE - start) - start 3557 fs/cifs/file.c size_t start; start 3591 fs/cifs/file.c cur_len, &start); start 3617 fs/cifs/file.c npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE; start 3618 fs/cifs/file.c rdata->page_offset = start; start 3620 fs/cifs/file.c cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE : start 4568 fs/cifs/file.c start: start 4619 fs/cifs/file.c goto start; start 2135 fs/cifs/inode.c int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start, start 2163 fs/cifs/inode.c rc = server->ops->fiemap(tcon, cfile, fei, start, len); start 575 fs/cifs/misc.c start: start 593 fs/cifs/misc.c goto start; start 848 fs/cifs/misc.c size_t start; start 885 fs/cifs/misc.c rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); start 899 fs/cifs/misc.c rc += start; start 911 fs/cifs/misc.c bv[npages + i].bv_offset = start; start 912 fs/cifs/misc.c bv[npages + i].bv_len = len - start; start 914 fs/cifs/misc.c start = 0; start 3230 fs/cifs/smb2ops.c struct fiemap_extent_info *fei, u64 start, u64 len) start 3243 fs/cifs/smb2ops.c in_data.file_offset = cpu_to_le64(start); start 3291 fs/cifs/smb2ops.c len = len - (next - start); start 3292 fs/cifs/smb2ops.c start = next; start 2078 fs/cifs/smbdirect.c int start, i, j; start 2127 fs/cifs/smbdirect.c start = i = 0; start 2132 fs/cifs/smbdirect.c if (i > start) { start 2138 fs/cifs/smbdirect.c start, i, i-start, start 2141 fs/cifs/smbdirect.c info, &iov[start], i-start, start 2150 fs/cifs/smbdirect.c start, iov[start].iov_base, start 2154 fs/cifs/smbdirect.c (char *)iov[start].iov_base + start 2178 fs/cifs/smbdirect.c start = i; start 2188 fs/cifs/smbdirect.c start, i, i-start, start 2190 fs/cifs/smbdirect.c rc = smbd_post_send_data(info, &iov[start], start 2191 fs/cifs/smbdirect.c i-start, remaining_data_length); start 15 fs/coda/coda_int.h int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); start 269 fs/coda/file.c int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) start 280 fs/coda/file.c err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); start 340 fs/coredump.c static int zap_process(struct task_struct *start, int exit_code, int flags) start 346 fs/coredump.c start->signal->flags = SIGNAL_GROUP_COREDUMP | flags; start 347 fs/coredump.c start->signal->group_exit_code = exit_code; start 348 fs/coredump.c start->signal->group_stop_count = 0; start 350 fs/coredump.c for_each_thread(start, t) { start 1718 fs/dax.c loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; start 1722 fs/dax.c err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); start 1464 fs/dcache.c struct dentry *start; start 1477 fs/dcache.c if (data->start == dentry) start 1506 fs/dcache.c if (data->start == dentry) start 1541 fs/dcache.c struct select_data data = {.start = parent}; start 575 fs/dlm/debug_fs.c .start = table_seq_start, start 582 fs/dlm/debug_fs.c .start = table_seq_start, start 589 fs/dlm/debug_fs.c .start = table_seq_start, start 596 fs/dlm/debug_fs.c .start = table_seq_start, start 90 fs/dlm/plock.c op->info.start = 0; start 126 fs/dlm/plock.c op->info.start = fl->fl_start; start 276 fs/dlm/plock.c op->info.start = fl->fl_start; start 337 fs/dlm/plock.c op->info.start = fl->fl_start; start 368 fs/dlm/plock.c fl->fl_start = op->info.start; start 53 fs/dlm/recoverd.c unsigned long start; start 95 fs/dlm/recoverd.c start = jiffies; start 238 fs/dlm/recoverd.c jiffies_to_msecs(jiffies - start)); start 324 fs/ecryptfs/file.c ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 180 fs/efs/inode.c efs_block_t start; start 188 fs/efs/inode.c start = ptr->cooked.ex_bn; start 193 fs/efs/inode.c return(sb->fs_start + start + block - offset); start 336 fs/ext2/balloc.c ext2_fsblk_t start = rsv->rsv_start; start 347 fs/ext2/balloc.c if (start < this->rsv_start) start 349 fs/ext2/balloc.c else if (start > this->rsv_end) start 579 fs/ext2/balloc.c bitmap_search_next_usable_block(ext2_grpblk_t start, struct buffer_head *bh, start 584 fs/ext2/balloc.c next = ext2_find_next_zero_bit(bh->b_data, maxblocks, start); start 603 fs/ext2/balloc.c find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) start 608 fs/ext2/balloc.c if (start > 0) { start 617 fs/ext2/balloc.c ext2_grpblk_t end_goal = (start + 63) & ~63; start 620 fs/ext2/balloc.c here = ext2_find_next_zero_bit(bh->b_data, end_goal, start); start 626 fs/ext2/balloc.c here = start; start 670 fs/ext2/balloc.c ext2_grpblk_t start, end; start 677 fs/ext2/balloc.c start = my_rsv->_rsv_start - group_first_block; start 680 fs/ext2/balloc.c start = 0; start 685 fs/ext2/balloc.c if ((start <= grp_goal) && (grp_goal < end)) start 686 fs/ext2/balloc.c start = grp_goal; start 691 fs/ext2/balloc.c start = grp_goal; start 693 fs/ext2/balloc.c start = 0; start 697 fs/ext2/balloc.c BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb)); start 701 fs/ext2/balloc.c grp_goal = find_next_usable_block(start, bitmap_bh, end); start 707 fs/ext2/balloc.c for (i = 0; i < 7 && grp_goal > start && start 714 fs/ext2/balloc.c start = grp_goal; start 722 fs/ext2/balloc.c start++; start 724 fs/ext2/balloc.c if (start >= end) start 346 fs/ext2/dir.c unsigned long start, n; start 359 fs/ext2/dir.c start = ei->i_dir_start_lookup; start 360 fs/ext2/dir.c if (start >= npages) start 361 fs/ext2/dir.c start = 0; start 362 fs/ext2/dir.c n = start; start 395 fs/ext2/dir.c } while (n != start); start 764 fs/ext2/ext2.h u64 start, u64 len); start 790 fs/ext2/ext2.h extern int ext2_fsync(struct file *file, loff_t start, loff_t end, start 150 fs/ext2/file.c int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 155 fs/ext2/file.c ret = generic_file_fsync(file, start, end, datasync); start 297 fs/ext2/inode.c __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; start 303 fs/ext2/inode.c for (p = ind->p - 1; p >= start; p--) start 864 fs/ext2/inode.c u64 start, u64 len) start 866 fs/ext2/inode.c return generic_block_fiemap(inode, fieinfo, start, len, start 92 fs/ext4/balloc.c ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); start 115 fs/ext4/balloc.c ext4_block_bitmap(sb, gdp) - start); start 126 fs/ext4/balloc.c ext4_inode_bitmap(sb, gdp) - start); start 138 fs/ext4/balloc.c c = EXT4_B2C(sbi, itbl_blk + i - start); start 186 fs/ext4/balloc.c ext4_fsblk_t start, tmp; start 207 fs/ext4/balloc.c start = ext4_group_first_block_no(sb, block_group); start 212 fs/ext4/balloc.c ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); start 216 fs/ext4/balloc.c ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); start 222 fs/ext4/balloc.c ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); start 2655 fs/ext4/ext4.h ext4_lblk_t start, ext4_lblk_t end); start 3189 fs/ext4/ext4.h int *has_inline, __u64 start, __u64 len); start 3283 fs/ext4/ext4.h extern int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, start 3311 fs/ext4/ext4.h __u64 start, __u64 len); start 3314 fs/ext4/ext4.h __u64 start, __u64 len); start 2183 fs/ext4/extents.c ext4_lblk_t next, next_del, start = 0, end = 0; start 2217 fs/ext4/extents.c start = block; start 2221 fs/ext4/extents.c start = block; start 2228 fs/ext4/extents.c start = block; start 2237 fs/ext4/extents.c start = block; start 2246 fs/ext4/extents.c BUG_ON(end <= start); start 2249 fs/ext4/extents.c es.es_lblk = start; start 2250 fs/ext4/extents.c es.es_len = end - start; start 2733 fs/ext4/extents.c ext4_lblk_t start, ext4_lblk_t end) start 2748 fs/ext4/extents.c ext_debug("truncate since %u in leaf to %u\n", start, end); start 2764 fs/ext4/extents.c trace_ext4_ext_rm_leaf(inode, start, ex, partial); start 2767 fs/ext4/extents.c ex_ee_block + ex_ee_len > start) { start 2778 fs/ext4/extents.c a = ex_ee_block > start ? ex_ee_block : start; start 2806 fs/ext4/extents.c start, end, ex_ee_block, start 2941 fs/ext4/extents.c int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, start 2955 fs/ext4/extents.c ext_debug("truncate since %u to %u\n", start, end); start 2963 fs/ext4/extents.c trace_ext4_ext_remove_space(inode, start, end, depth); start 3084 fs/ext4/extents.c &partial, start, end); start 3156 fs/ext4/extents.c trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, start 4706 fs/ext4/extents.c loff_t start, end; start 4728 fs/ext4/extents.c start = round_up(offset, 1 << blkbits); start 4731 fs/ext4/extents.c if (start < offset || end > offset + len) start 4736 fs/ext4/extents.c lblk = start >> blkbits; start 4804 fs/ext4/extents.c truncate_pagecache_range(inode, start, end - 1); start 5111 fs/ext4/extents.c __u64 start, __u64 len, start 5125 fs/ext4/extents.c start, len); start 5141 fs/ext4/extents.c return generic_block_fiemap(inode, fieinfo, start, len, start 5155 fs/ext4/extents.c start_blk = start >> inode->i_sb->s_blocksize_bits; start 5156 fs/ext4/extents.c last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; start 5171 fs/ext4/extents.c __u64 start, __u64 len) start 5173 fs/ext4/extents.c return _ext4_fiemap(inode, fieinfo, start, len, start 5178 fs/ext4/extents.c __u64 start, __u64 len) start 5190 fs/ext4/extents.c return _ext4_fiemap(inode, fieinfo, start, len, start 5320 fs/ext4/extents.c ext4_lblk_t start, ext4_lblk_t shift, start 5347 fs/ext4/extents.c path = ext4_find_extent(inode, start - 1, &path, start 5362 fs/ext4/extents.c if ((start == ex_start && shift > ex_start) || start 5363 fs/ext4/extents.c (shift > start - ex_end)) { start 5381 fs/ext4/extents.c iterator = &start; start 5390 fs/ext4/extents.c while (iterator && start <= stop) { start 5425 fs/ext4/extents.c while (le32_to_cpu(extent->ee_block) < start) start 340 fs/ext4/extents_status.c ext4_lblk_t start, ext4_lblk_t end) start 344 fs/ext4/extents_status.c __es_find_extent_range(inode, matching_fn, start, end, &es); start 347 fs/ext4/extents_status.c else if (es.es_lblk <= start && start 348 fs/ext4/extents_status.c start < es.es_lblk + es.es_len) start 350 fs/ext4/extents_status.c else if (start <= es.es_lblk && es.es_lblk <= end) start 1721 fs/ext4/extents_status.c ext4_lblk_t start = ei->i_es_shrink_lblk; start 1733 fs/ext4/extents_status.c start != 0) start 1734 fs/ext4/extents_status.c es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); start 2020 fs/ext4/extents_status.c static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start, start 2034 fs/ext4/extents_status.c es = __es_tree_search(&tree->root, start); start 2038 fs/ext4/extents_status.c if (es->es_lblk <= start) start 2039 fs/ext4/extents_status.c first_lclu = EXT4_B2C(sbi, start); start 187 fs/ext4/fsmap.c ext4_group_t agno, ext4_grpblk_t start, start 199 fs/ext4/fsmap.c fsb = (EXT4_C2B(sbi, start) + ext4_group_first_block_no(sb, agno)); start 95 fs/ext4/fsync.c int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) start 120 fs/ext4/fsync.c ret = __generic_file_fsync(file, start, end, datasync); start 128 fs/ext4/fsync.c ret = file_write_and_wait_range(file, start, end); start 213 fs/ext4/indirect.c __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; start 217 fs/ext4/indirect.c for (p = ind->p - 1; p >= start; p--) { start 1211 fs/ext4/indirect.c ext4_lblk_t start, ext4_lblk_t end) start 1229 fs/ext4/indirect.c if ((start >= end) || (start > max_block)) start 1232 fs/ext4/indirect.c n = ext4_block_to_path(inode, start, offsets, NULL); start 1860 fs/ext4/inline.c int *has_inline, __u64 start, __u64 len) start 1876 fs/ext4/inline.c if (start >= inline_len) start 1878 fs/ext4/inline.c if (start + len < inline_len) start 1879 fs/ext4/inline.c inline_len = start + len; start 1880 fs/ext4/inline.c inline_len -= start; start 1894 fs/ext4/inline.c error = fiemap_fill_next_extent(fieinfo, start, physical, start 1502 fs/ext4/inode.c unsigned start, size; start 1504 fs/ext4/inode.c start = max(from, block_start); start 1505 fs/ext4/inode.c size = min(to, block_end) - start; start 1507 fs/ext4/inode.c zero_user(page, start, size); start 1705 fs/ext4/inode.c ext4_lblk_t start, last; start 1706 fs/ext4/inode.c start = index << (PAGE_SHIFT - inode->i_blkbits); start 1708 fs/ext4/inode.c ext4_es_remove_extent(inode, start, last - start + 1); start 2373 fs/ext4/inode.c pgoff_t start, end; start 2378 fs/ext4/inode.c start = mpd->map.m_lblk >> bpp_bits; start 2380 fs/ext4/inode.c lblk = start << bpp_bits; start 2384 fs/ext4/inode.c while (start <= end) { start 2386 fs/ext4/inode.c &start, end); start 3163 fs/ext4/inode.c unsigned long start, end; start 3171 fs/ext4/inode.c start = pos & (PAGE_SIZE - 1); start 3172 fs/ext4/inode.c end = start + copied - 1; start 4149 fs/ext4/inode.c ext4_fsblk_t start, end; start 4156 fs/ext4/inode.c start = lstart >> sb->s_blocksize_bits; start 4160 fs/ext4/inode.c if (start == end && start 4625 fs/ext4/inode.c int i, start; start 4627 fs/ext4/inode.c start = inode_offset & ~(inodes_per_block - 1); start 4643 fs/ext4/inode.c for (i = start; i < start + inodes_per_block; i++) { start 4650 fs/ext4/inode.c if (i == start + inodes_per_block) { start 750 fs/ext4/ioctl.c u64 start, u64 len, u64 *new_len) start 759 fs/ext4/ioctl.c if (start > maxbytes) start 765 fs/ext4/ioctl.c if (len > maxbytes || (maxbytes - len) < start) start 766 fs/ext4/ioctl.c *new_len = maxbytes - start; start 402 fs/ext4/mballoc.c static inline int mb_find_next_zero_bit(void *addr, int max, int start) start 407 fs/ext4/mballoc.c start += fix; start 409 fs/ext4/mballoc.c ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; start 415 fs/ext4/mballoc.c static inline int mb_find_next_bit(void *addr, int max, int start) start 420 fs/ext4/mballoc.c start += fix; start 422 fs/ext4/mballoc.c ret = ext4_find_next_bit(addr, tmpmax, start) - fix; start 1562 fs/ext4/mballoc.c int start = ex->fe_start; start 1568 fs/ext4/mballoc.c BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); start 1572 fs/ext4/mballoc.c mb_mark_used_double(e4b, start, len); start 1575 fs/ext4/mballoc.c if (e4b->bd_info->bb_first_free == start) start 1579 fs/ext4/mballoc.c if (start != 0) start 1580 fs/ext4/mballoc.c mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); start 1581 fs/ext4/mballoc.c if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) start 1582 fs/ext4/mballoc.c max = !mb_test_bit(start + len, e4b->bd_bitmap); start 1590 fs/ext4/mballoc.c ord = mb_find_order_for_block(e4b, start); start 1592 fs/ext4/mballoc.c if (((start >> ord) << ord) == start && len >= (1 << ord)) { start 1596 fs/ext4/mballoc.c BUG_ON((start >> ord) >= max); start 1597 fs/ext4/mballoc.c mb_set_bit(start >> ord, buddy); start 1599 fs/ext4/mballoc.c start += mlen; start 1612 fs/ext4/mballoc.c mb_set_bit(start >> ord, buddy); start 1616 fs/ext4/mballoc.c cur = (start >> ord) & ~1U; start 1847 fs/ext4/mballoc.c ext4_fsblk_t start; start 1849 fs/ext4/mballoc.c start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + start 1852 fs/ext4/mballoc.c if (do_div(start, sbi->s_stripe) == 0) { start 2338 fs/ext4/mballoc.c .start = ext4_mb_seq_groups_start, start 3090 fs/ext4/mballoc.c ext4_lblk_t start; start 3165 fs/ext4/mballoc.c start = start_off >> bsbits; start 3168 fs/ext4/mballoc.c if (ar->pleft && start <= ar->lleft) { start 3169 fs/ext4/mballoc.c size -= ar->lleft + 1 - start; start 3170 fs/ext4/mballoc.c start = ar->lleft + 1; start 3172 fs/ext4/mballoc.c if (ar->pright && start + size - 1 >= ar->lright) start 3173 fs/ext4/mballoc.c size -= start + size - ar->lright; start 3182 fs/ext4/mballoc.c end = start + size; start 3205 fs/ext4/mballoc.c if (pa->pa_lstart >= end || pa_end <= start) { start 3209 fs/ext4/mballoc.c BUG_ON(pa->pa_lstart <= start && pa_end >= end); start 3213 fs/ext4/mballoc.c BUG_ON(pa_end < start); start 3214 fs/ext4/mballoc.c start = pa_end; start 3222 fs/ext4/mballoc.c size = end - start; start 3233 fs/ext4/mballoc.c BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); start 3239 fs/ext4/mballoc.c if (start + size <= ac->ac_o_ex.fe_logical && start 3240 fs/ext4/mballoc.c start > ac->ac_o_ex.fe_logical) { start 3243 fs/ext4/mballoc.c (unsigned long) start, (unsigned long) size, start 3253 fs/ext4/mballoc.c ac->ac_g_ex.fe_logical = start; start 3257 fs/ext4/mballoc.c if (ar->pright && (ar->lright == (start + size))) { start 3264 fs/ext4/mballoc.c if (ar->pleft && (ar->lleft + 1 == start)) { start 3273 fs/ext4/mballoc.c (unsigned) orig_size, (unsigned) start); start 3342 fs/ext4/mballoc.c ext4_fsblk_t start; start 3347 fs/ext4/mballoc.c start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); start 3349 fs/ext4/mballoc.c start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); start 3350 fs/ext4/mballoc.c len = EXT4_NUM_B2C(sbi, end - start); start 3351 fs/ext4/mballoc.c ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, start 3357 fs/ext4/mballoc.c BUG_ON(start < pa->pa_pstart); start 3362 fs/ext4/mballoc.c mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); start 3544 fs/ext4/mballoc.c ext4_grpblk_t start; start 3560 fs/ext4/mballoc.c &groupnr, &start); start 3566 fs/ext4/mballoc.c ext4_set_bits(bitmap, start, len); start 4150 fs/ext4/mballoc.c ext4_grpblk_t start; start 4158 fs/ext4/mballoc.c NULL, &start); start 4161 fs/ext4/mballoc.c start, pa->pa_len); start 5130 fs/ext4/mballoc.c static int ext4_trim_extent(struct super_block *sb, int start, int count, start 5138 fs/ext4/mballoc.c trace_ext4_trim_extent(sb, group, start, count); start 5142 fs/ext4/mballoc.c ex.fe_start = start; start 5152 fs/ext4/mballoc.c ret = ext4_issue_discard(sb, group, start, count, NULL); start 5154 fs/ext4/mballoc.c mb_free_blocks(NULL, e4b, start, ex.fe_len); start 5178 fs/ext4/mballoc.c ext4_grpblk_t start, ext4_grpblk_t max, start 5186 fs/ext4/mballoc.c trace_ext4_trim_all_free(sb, group, start, max); start 5201 fs/ext4/mballoc.c start = (e4b.bd_info->bb_first_free > start) ? start 5202 fs/ext4/mballoc.c e4b.bd_info->bb_first_free : start; start 5204 fs/ext4/mballoc.c while (start <= max) { start 5205 fs/ext4/mballoc.c start = mb_find_next_zero_bit(bitmap, max + 1, start); start 5206 fs/ext4/mballoc.c if (start > max) start 5208 fs/ext4/mballoc.c next = mb_find_next_bit(bitmap, max + 1, start); start 5210 fs/ext4/mballoc.c if ((next - start) >= minblocks) { start 5211 fs/ext4/mballoc.c ret = ext4_trim_extent(sb, start, start 5212 fs/ext4/mballoc.c next - start, group, &e4b); start 5216 fs/ext4/mballoc.c count += next - start; start 5218 fs/ext4/mballoc.c free_count += next - start; start 5219 fs/ext4/mballoc.c start = next + 1; start 5267 fs/ext4/mballoc.c uint64_t start, end, minlen, trimmed = 0; start 5273 fs/ext4/mballoc.c start = range->start >> sb->s_blocksize_bits; start 5274 fs/ext4/mballoc.c end = start + (range->len >> sb->s_blocksize_bits) - 1; start 5279 fs/ext4/mballoc.c start >= max_blks || start 5286 fs/ext4/mballoc.c if (start < first_data_blk) start 5287 fs/ext4/mballoc.c start = first_data_blk; start 5290 fs/ext4/mballoc.c ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, start 5346 fs/ext4/mballoc.c ext4_grpblk_t start, start 5363 fs/ext4/mballoc.c start = (e4b.bd_info->bb_first_free > start) ? start 5364 fs/ext4/mballoc.c e4b.bd_info->bb_first_free : start; start 5368 fs/ext4/mballoc.c while (start <= end) { start 5369 fs/ext4/mballoc.c start = mb_find_next_zero_bit(bitmap, end + 1, start); start 5370 fs/ext4/mballoc.c if (start > end) start 5372 fs/ext4/mballoc.c next = mb_find_next_bit(bitmap, end + 1, start); start 5375 fs/ext4/mballoc.c error = formatter(sb, group, start, next - start, priv); start 5380 fs/ext4/mballoc.c start = next + 1; start 205 fs/ext4/mballoc.h ext4_grpblk_t start, start 213 fs/ext4/mballoc.h ext4_grpblk_t start, start 620 fs/ext4/migrate.c ext4_lblk_t start, end; start 661 fs/ext4/migrate.c blk = len = start = end = 0; start 665 fs/ext4/migrate.c start = le32_to_cpu(ex->ee_block); start 666 fs/ext4/migrate.c end = start + len - 1; start 675 fs/ext4/migrate.c for (i = start; i <= end; i++) start 1447 fs/ext4/namei.c ext4_lblk_t start, block; start 1479 fs/ext4/namei.c block = start = 0; start 1501 fs/ext4/namei.c start = EXT4_I(dir)->i_dir_start_lookup; start 1502 fs/ext4/namei.c if (start >= nblocks) start 1503 fs/ext4/namei.c start = 0; start 1504 fs/ext4/namei.c block = start; start 1514 fs/ext4/namei.c if (block < start) start 1515 fs/ext4/namei.c ra_max = start - block; start 1562 fs/ext4/namei.c } while (block != start); start 1571 fs/ext4/namei.c start = 0; start 120 fs/ext4/resize.c ext4_fsblk_t start = ext4_blocks_count(es); start 121 fs/ext4/resize.c ext4_fsblk_t end = start + input->blocks_count; start 137 fs/ext4/resize.c metaend = start + overhead; start 148 fs/ext4/resize.c ext4_get_group_no_and_offset(sb, start, NULL, &offset); start 162 fs/ext4/resize.c } else if (outside(input->block_bitmap, start, end)) start 165 fs/ext4/resize.c else if (outside(input->inode_bitmap, start, end)) start 168 fs/ext4/resize.c else if (outside(input->inode_table, start, end) || start 169 fs/ext4/resize.c outside(itend - 1, start, end)) start 185 fs/ext4/resize.c else if (inside(input->block_bitmap, start, metaend)) start 188 fs/ext4/resize.c start, metaend - 1); start 189 fs/ext4/resize.c else if (inside(input->inode_bitmap, start, metaend)) start 192 fs/ext4/resize.c start, metaend - 1); start 193 fs/ext4/resize.c else if (inside(input->inode_table, start, metaend) || start 194 fs/ext4/resize.c inside(itend - 1, start, metaend)) start 198 fs/ext4/resize.c itend - 1, start, metaend - 1); start 463 fs/ext4/resize.c ext4_fsblk_t start; start 469 fs/ext4/resize.c start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); start 472 fs/ext4/resize.c count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); start 496 fs/ext4/resize.c first_cluster, first_cluster - start, count2); start 497 fs/ext4/resize.c ext4_set_bits(bh->b_data, first_cluster - start, count2); start 526 fs/ext4/resize.c ext4_fsblk_t start; start 555 fs/ext4/resize.c start = ext4_group_first_block_no(sb, group); start 568 fs/ext4/resize.c block = start + ext4_bg_has_super(sb, group); start 606 fs/ext4/resize.c err = sb_issue_zeroout(sb, gdblocks + start + 1, start 644 fs/ext4/resize.c start); start 682 fs/ext4/resize.c start = (&group_data[0].block_bitmap)[j]; start 683 fs/ext4/resize.c block = start; start 692 fs/ext4/resize.c EXT4_B2C(sbi, start), start 694 fs/ext4/resize.c start + count start 699 fs/ext4/resize.c start = (&group_data[i].block_bitmap)[j]; start 700 fs/ext4/resize.c block = start; start 706 fs/ext4/resize.c EXT4_B2C(sbi, start), start 708 fs/ext4/resize.c start + count start 3310 fs/ext4/super.c ext4_group_t start) start 3321 fs/ext4/super.c elr->lr_next_group = start; start 4834 fs/ext4/super.c ext4_fsblk_t start; start 4891 fs/ext4/super.c start = sb_block + 1; start 4895 fs/ext4/super.c start, len, blocksize); start 211 fs/f2fs/checkpoint.c int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, start 215 fs/f2fs/checkpoint.c block_t blkno = start; start 274 fs/f2fs/checkpoint.c return blkno - start; start 313 fs/f2fs/data.c unsigned int start; start 324 fs/f2fs/data.c start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; start 325 fs/f2fs/data.c start %= F2FS_IO_SIZE(sbi); start 327 fs/f2fs/data.c if (start == 0) start 331 fs/f2fs/data.c for (; start < F2FS_IO_SIZE(sbi); start++) { start 1537 fs/f2fs/data.c u64 start, u64 len) start 1564 fs/f2fs/data.c ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); start 1572 fs/f2fs/data.c start_blk = logical_to_blk(inode, start); start 1573 fs/f2fs/data.c last_blk = logical_to_blk(inode, start + len - 1); start 629 fs/f2fs/dir.c start: start 664 fs/f2fs/dir.c goto start; start 283 fs/f2fs/f2fs.h block_t start; /* actual start address in dev */ start 292 fs/f2fs/f2fs.h block_t start; /* actual start address in dev */ start 455 fs/f2fs/f2fs.h u64 start; start 460 fs/f2fs/f2fs.h u64 start; start 2925 fs/f2fs/f2fs.h int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); start 3061 fs/f2fs/f2fs.h struct page *f2fs_get_node_page_ra(struct page *parent, int start); start 3113 fs/f2fs/f2fs.h unsigned int start, unsigned int end); start 3166 fs/f2fs/f2fs.h int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, start 3230 fs/f2fs/f2fs.h u64 start, u64 len); start 3516 fs/f2fs/f2fs.h __u64 start, __u64 len); start 207 fs/f2fs/file.c static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, start 234 fs/f2fs/file.c ret = file_write_and_wait_range(file, start, end); start 336 fs/f2fs/file.c int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) start 340 fs/f2fs/file.c return f2fs_do_sync_file(file, start, end, datasync, false); start 888 fs/f2fs/file.c loff_t start, loff_t len) start 906 fs/f2fs/file.c zero_user(page, start, len); start 1227 fs/f2fs/file.c pgoff_t start = offset >> PAGE_SHIFT; start 1240 fs/f2fs/file.c ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); start 1288 fs/f2fs/file.c static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, start 1292 fs/f2fs/file.c pgoff_t index = start; start 1309 fs/f2fs/file.c for (index = start; index < end; index++, dn->ofs_in_node++) { start 1327 fs/f2fs/file.c f2fs_update_extent_cache_range(dn, start, 0, index - start); start 2319 fs/f2fs/file.c end = range.start + range.len; start 2320 fs/f2fs/file.c if (end < range.start || range.start < MAIN_BLKADDR(sbi) || start 2338 fs/f2fs/file.c ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); start 2339 fs/f2fs/file.c range.start += BLKS_PER_SEC(sbi); start 2340 fs/f2fs/file.c if (range.start <= end) start 2394 fs/f2fs/file.c pg_start = range->start >> PAGE_SHIFT; start 2395 fs/f2fs/file.c pg_end = (range->start + range->len) >> PAGE_SHIFT; start 2402 fs/f2fs/file.c err = filemap_write_and_wait_range(inode->i_mapping, range->start, start 2403 fs/f2fs/file.c range->start + range->len - 1); start 2543 fs/f2fs/file.c if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) start 2546 fs/f2fs/file.c if (unlikely((range.start + range.len) >> PAGE_SHIFT > start 246 fs/f2fs/gc.c unsigned int start = GET_SEG_FROM_SEC(sbi, secno); start 254 fs/f2fs/gc.c mtime += get_seg_entry(sbi, start + i)->mtime; start 1386 fs/f2fs/gc.c static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, start 1395 fs/f2fs/gc.c allocate_segment_for_resize(sbi, type, start, end); start 1398 fs/f2fs/gc.c for (segno = start; segno <= end; segno += sbi->segs_per_sec) { start 1417 fs/f2fs/gc.c next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); start 700 fs/f2fs/inline.c struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) start 725 fs/f2fs/inline.c if (start >= ilen) start 727 fs/f2fs/inline.c if (start + len < ilen) start 728 fs/f2fs/inline.c ilen = start + len; start 729 fs/f2fs/inline.c ilen -= start; start 738 fs/f2fs/inline.c err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags); start 106 fs/f2fs/inode.c __le32 *start = inline_data; start 107 fs/f2fs/inode.c __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); start 109 fs/f2fs/inode.c while (start < end) { start 110 fs/f2fs/inode.c if (*start++) { start 206 fs/f2fs/namei.c int start, count; start 220 fs/f2fs/namei.c start = cold_count; start 223 fs/f2fs/namei.c start = 0; start 227 fs/f2fs/namei.c for (i = start; i < count; i++) { start 202 fs/f2fs/node.c nid_t start, unsigned int nr, struct nat_entry **ep) start 204 fs/f2fs/node.c return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); start 280 fs/f2fs/node.c nid_t start, unsigned int nr, struct nat_entry_set **ep) start 283 fs/f2fs/node.c start, nr); start 585 fs/f2fs/node.c static void f2fs_ra_node_pages(struct page *parent, int start, int n) start 595 fs/f2fs/node.c end = start + n; start 597 fs/f2fs/node.c for (i = start; i < end; i++) { start 1336 fs/f2fs/node.c struct page *parent, int start) start 1360 fs/f2fs/node.c f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); start 1398 fs/f2fs/node.c struct page *f2fs_get_node_page_ra(struct page *parent, int start) start 1401 fs/f2fs/node.c nid_t nid = get_nid(parent, start, false); start 1403 fs/f2fs/node.c return __get_node_page(sbi, nid, parent, start); start 193 fs/f2fs/node.h static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) start 204 fs/f2fs/node.h block_off = NAT_BLOCK_OFFSET(start); start 512 fs/f2fs/recovery.c unsigned int start, end; start 530 fs/f2fs/recovery.c start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); start 531 fs/f2fs/recovery.c end = start + ADDRS_PER_PAGE(page, inode); start 535 fs/f2fs/recovery.c err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); start 560 fs/f2fs/recovery.c for (; start < end; start++, dn.ofs_in_node++) { start 589 fs/f2fs/recovery.c (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) start 591 fs/f2fs/recovery.c (loff_t)(start + 1) << PAGE_SHIFT); start 936 fs/f2fs/segment.c block_t start, block_t len) start 950 fs/f2fs/segment.c dc->start = start; start 968 fs/f2fs/segment.c block_t start, block_t len, start 975 fs/f2fs/segment.c dc = __create_discard_cmd(sbi, bdev, lstart, start, len); start 1004 fs/f2fs/segment.c trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); start 1021 fs/f2fs/segment.c KERN_INFO, dc->lstart, dc->start, dc->len, dc->error); start 1043 fs/f2fs/segment.c block_t start, block_t end) start 1048 fs/f2fs/segment.c block_t blk = start; start 1111 fs/f2fs/segment.c block_t start, block_t len); start 1126 fs/f2fs/segment.c block_t lstart, start, len, total_len; start 1135 fs/f2fs/segment.c trace_f2fs_issue_discard(bdev, dc->start, dc->len); start 1138 fs/f2fs/segment.c start = dc->start; start 1166 fs/f2fs/segment.c SECTOR_FROM_BLOCK(start), start 1210 fs/f2fs/segment.c start += len; start 1216 fs/f2fs/segment.c __update_discard_tree_range(sbi, bdev, lstart, start, len); start 1222 fs/f2fs/segment.c block_t start, block_t len, start 1241 fs/f2fs/segment.c dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, start 1279 fs/f2fs/segment.c di.start + blkaddr + 1 - di.lstart, start 1285 fs/f2fs/segment.c dc->start++; start 1294 fs/f2fs/segment.c block_t start, block_t len) start 1318 fs/f2fs/segment.c di.start = start; start 1337 fs/f2fs/segment.c di.start = start + di.lstart - lstart; start 1361 fs/f2fs/segment.c next_dc->di.start = di.start; start 1370 fs/f2fs/segment.c __insert_discard_tree(sbi, bdev, di.lstart, di.start, start 1578 fs/f2fs/segment.c block_t start, block_t end) start 1592 fs/f2fs/segment.c if (dc->lstart + dc->len <= start || end <= dc->lstart) start 1804 fs/f2fs/segment.c sector_t start = blkstart, len = 0; start 1814 fs/f2fs/segment.c if (i != start) { start 1820 fs/f2fs/segment.c start, len); start 1824 fs/f2fs/segment.c start = i; start 1837 fs/f2fs/segment.c err = __issue_discard_async(sbi, bdev, start, len); start 1851 fs/f2fs/segment.c unsigned int start = 0, end = -1; start 1874 fs/f2fs/segment.c start = __find_rev_next_bit(dmap, max_blocks, end + 1); start 1875 fs/f2fs/segment.c if (start >= max_blocks) start 1878 fs/f2fs/segment.c end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); start 1879 fs/f2fs/segment.c if (force && start && end != max_blocks start 1880 fs/f2fs/segment.c && (end - start) < cpc->trim_minlen) start 1893 fs/f2fs/segment.c for (i = start; i < end; i++) start 1896 fs/f2fs/segment.c SM_I(sbi)->dcc_info->nr_discards += end - start; start 1939 fs/f2fs/segment.c unsigned int start = 0, end = -1; start 1951 fs/f2fs/segment.c start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); start 1952 fs/f2fs/segment.c if (start >= MAIN_SEGS(sbi)) start 1955 fs/f2fs/segment.c start + 1); start 1958 fs/f2fs/segment.c start = rounddown(start, sbi->segs_per_sec); start 1962 fs/f2fs/segment.c for (i = start; i < end; i++) { start 1970 fs/f2fs/segment.c if (force && start >= cpc->trim_start && start 1975 fs/f2fs/segment.c f2fs_issue_discard(sbi, START_BLOCK(sbi, start), start 1976 fs/f2fs/segment.c (end - start) << sbi->log_blocks_per_seg); start 1980 fs/f2fs/segment.c secno = GET_SEC_FROM_SEG(sbi, start); start 1983 fs/f2fs/segment.c !get_valid_blocks(sbi, start, true)) start 1987 fs/f2fs/segment.c start = start_segno + sbi->segs_per_sec; start 1988 fs/f2fs/segment.c if (start < end) start 1991 fs/f2fs/segment.c end = start - 1; start 2528 fs/f2fs/segment.c struct curseg_info *seg, block_t start) start 2540 fs/f2fs/segment.c pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); start 2670 fs/f2fs/segment.c unsigned int start, unsigned int end) start 2680 fs/f2fs/segment.c if (segno < start || segno > end) start 2745 fs/f2fs/segment.c unsigned int start, unsigned int end) start 2764 fs/f2fs/segment.c NULL, start, start 2788 fs/f2fs/segment.c start = dc->lstart + dc->len; start 2817 fs/f2fs/segment.c __u64 start = F2FS_BYTES_TO_BLK(range->start); start 2818 fs/f2fs/segment.c __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; start 2827 fs/f2fs/segment.c if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) start 2839 fs/f2fs/segment.c start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); start 3434 fs/f2fs/segment.c block_t start; start 3437 fs/f2fs/segment.c start = start_sum_block(sbi); start 3439 fs/f2fs/segment.c page = f2fs_get_meta_page(sbi, start++); start 3481 fs/f2fs/segment.c page = f2fs_get_meta_page(sbi, start++); start 3720 fs/f2fs/segment.c unsigned int start) start 3726 fs/f2fs/segment.c src_off = current_sit_addr(sbi, start); start 3730 fs/f2fs/segment.c seg_info_to_sit_page(sbi, page, start); start 3733 fs/f2fs/segment.c set_to_next_sit(sit_i, start); start 3949 fs/f2fs/segment.c unsigned int sit_segs, start; start 3984 fs/f2fs/segment.c for (start = 0; start < MAIN_SEGS(sbi); start++) { start 3985 fs/f2fs/segment.c sit_i->sentries[start].cur_valid_map = bitmap; start 3988 fs/f2fs/segment.c sit_i->sentries[start].ckpt_valid_map = bitmap; start 3992 fs/f2fs/segment.c sit_i->sentries[start].cur_valid_map_mir = bitmap; start 3996 fs/f2fs/segment.c sit_i->sentries[start].discard_map = bitmap; start 4121 fs/f2fs/segment.c unsigned int i, start, end; start 4130 fs/f2fs/segment.c start = start_blk * sit_i->sents_per_block; start 4133 fs/f2fs/segment.c for (; start < end && start < MAIN_SEGS(sbi); start++) { start 4137 fs/f2fs/segment.c se = &sit_i->sentries[start]; start 4138 fs/f2fs/segment.c page = get_current_sit_page(sbi, start); start 4142 fs/f2fs/segment.c sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; start 4145 fs/f2fs/segment.c err = check_block_count(sbi, start, &sit); start 4166 fs/f2fs/segment.c get_sec_entry(sbi, start)->valid_blocks += start 4176 fs/f2fs/segment.c start = le32_to_cpu(segno_in_journal(journal, i)); start 4177 fs/f2fs/segment.c if (start >= MAIN_SEGS(sbi)) { start 4179 fs/f2fs/segment.c start); start 4184 fs/f2fs/segment.c se = &sit_i->sentries[start]; start 4191 fs/f2fs/segment.c err = check_block_count(sbi, start, &sit); start 4208 fs/f2fs/segment.c get_sec_entry(sbi, start)->valid_blocks += start 4210 fs/f2fs/segment.c get_sec_entry(sbi, start)->valid_blocks -= start 4227 fs/f2fs/segment.c unsigned int start; start 4230 fs/f2fs/segment.c for (start = 0; start < MAIN_SEGS(sbi); start++) { start 4231 fs/f2fs/segment.c struct seg_entry *sentry = get_seg_entry(sbi, start); start 4233 fs/f2fs/segment.c __set_free(sbi, start); start 377 fs/f2fs/segment.h struct page *page, unsigned int start) start 382 fs/f2fs/segment.h unsigned int end = min(start + SIT_ENTRY_PER_BLOCK, start 388 fs/f2fs/segment.h for (i = 0; i < end - start; i++) { start 390 fs/f2fs/segment.h se = get_seg_entry(sbi, start + i); start 718 fs/f2fs/segment.h unsigned int start) start 721 fs/f2fs/segment.h unsigned int offset = SIT_BLOCK_OFFSET(start); start 724 fs/f2fs/segment.h check_seg_range(sbi, start); start 752 fs/f2fs/segment.h static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) start 754 fs/f2fs/segment.h unsigned int block_off = SIT_BLOCK_OFFSET(start); start 287 fs/fat/fat.h int cluster = le16_to_cpu(de->start); start 295 fs/fat/fat.h de->start = cpu_to_le16(cluster); start 404 fs/fat/fat.h extern int fat_file_fsync(struct file *file, loff_t start, loff_t end, start 721 fs/fat/fatent.c ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT); start 194 fs/fat/file.c int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) start 199 fs/fat/file.c err = __generic_file_fsync(filp, start, end, datasync); start 230 fs/fat/file.c loff_t start = inode->i_size, count = size - inode->i_size; start 246 fs/fat/file.c err = filemap_fdatawrite_range(mapping, start, start 247 fs/fat/file.c start + count - 1); start 255 fs/fat/file.c err = filemap_fdatawait_range(mapping, start, start 256 fs/fat/file.c start + count - 1); start 630 fs/fat/namei_vfat.c ps->start = 0; start 463 fs/file.c static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) start 467 fs/file.c unsigned int bitbit = start / BITS_PER_LONG; start 472 fs/file.c if (bitbit > start) start 473 fs/file.c start = bitbit; start 474 fs/file.c return find_next_zero_bit(fdt->open_fds, maxfd, start); start 481 fs/file.c unsigned start, unsigned end, unsigned flags) start 490 fs/file.c fd = start; start 516 fs/file.c if (start <= files->next_fd) start 538 fs/file.c static int alloc_fd(unsigned start, unsigned flags) start 540 fs/file.c return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); start 83 fs/fscache/histogram.c .start = fscache_histogram_start, start 302 fs/fscache/object-list.c .start = fscache_objlist_start, start 280 fs/fscache/object.c unsigned long start; start 284 fs/fscache/object.c start = jiffies; start 286 fs/fscache/object.c fscache_hist(fscache_objs_histogram, start); start 619 fs/fscache/operation.c unsigned long start; start 627 fs/fscache/operation.c start = jiffies; start 629 fs/fscache/operation.c fscache_hist(fscache_ops_histogram, start); start 1294 fs/fuse/dir.c static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, start 1308 fs/fuse/dir.c err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR); start 481 fs/fuse/file.c int fuse_fsync_common(struct file *file, loff_t start, loff_t end, start 501 fs/fuse/file.c static int fuse_fsync(struct file *file, loff_t start, loff_t end, start 518 fs/fuse/file.c err = file_write_and_wait_range(file, start, end); start 540 fs/fuse/file.c err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); start 1389 fs/fuse/file.c size_t start; start 1393 fs/fuse/file.c &start); start 1400 fs/fuse/file.c ret += start; start 1403 fs/fuse/file.c ap->descs[ap->num_pages].offset = start; start 2331 fs/fuse/file.c if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || start 2332 fs/fuse/file.c ffl->end < ffl->start) start 2335 fs/fuse/file.c fl->fl_start = ffl->start; start 2365 fs/fuse/file.c inarg->lk.start = fl->fl_start; start 3158 fs/fuse/file.c static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) start 3160 fs/fuse/file.c int err = filemap_write_and_wait_range(inode->i_mapping, start, end); start 864 fs/fuse/fuse_i.h int fuse_fsync_common(struct file *file, loff_t start, loff_t end, start 47 fs/gfs2/aops.c unsigned int start, end; start 49 fs/gfs2/aops.c for (bh = head, start = 0; bh != head || !start; start 50 fs/gfs2/aops.c bh = bh->b_this_page, start = end) { start 51 fs/gfs2/aops.c end = start + bsize; start 54 fs/gfs2/aops.c if (start >= to) start 299 fs/gfs2/bmap.c static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end) start 303 fs/gfs2/bmap.c for (t = start; t < end; t++) { start 491 fs/gfs2/bmap.c u16 start = mp->mp_list[hgt]; start 497 fs/gfs2/bmap.c ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start; start 507 fs/gfs2/bmap.c ptrs = mp->mp_list[hgt] - start; start 555 fs/gfs2/bmap.c const __be64 *start, *ptr, *end; start 559 fs/gfs2/bmap.c start = metapointer(hgt, mp); start 560 fs/gfs2/bmap.c end = start + ptrs; start 562 fs/gfs2/bmap.c for (ptr = start; ptr < end; ptr++) { start 564 fs/gfs2/bmap.c mp->mp_list[hgt] += ptr - start; start 1495 fs/gfs2/bmap.c struct buffer_head *bh, __be64 *start, __be64 *end, start 1519 fs/gfs2/bmap.c for (p = start; p < end; p++) { start 1564 fs/gfs2/bmap.c revokes += end - start; start 1702 fs/gfs2/bmap.c __be64 **start, __be64 **end) start 1708 fs/gfs2/bmap.c *start = first; start 1711 fs/gfs2/bmap.c *start = first + start_list[height] + keep_start; start 1769 fs/gfs2/bmap.c __be64 *start, *end; start 1839 fs/gfs2/bmap.c end_list, end_aligned, &start, &end); start 1840 fs/gfs2/bmap.c gfs2_metapath_ra(ip->i_gl, start, end); start 1890 fs/gfs2/bmap.c &start, &end); start 1892 fs/gfs2/bmap.c start, end, start 1959 fs/gfs2/bmap.c &start, &end); start 1960 fs/gfs2/bmap.c gfs2_metapath_ra(ip->i_gl, start, end); start 2295 fs/gfs2/bmap.c ktime_t start, end; start 2297 fs/gfs2/bmap.c start = ktime_get(); start 2319 fs/gfs2/bmap.c jd->nr_extents, ktime_ms_delta(end, start)); start 438 fs/gfs2/dir.c const char *start = name->name; start 440 fs/gfs2/dir.c if (name->len == (end - start)) start 1010 fs/gfs2/dir.c u32 start, len, half_len, divider; start 1053 fs/gfs2/dir.c start = (index & ~(len - 1)); start 1070 fs/gfs2/dir.c error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64), start 1081 fs/gfs2/dir.c divider = (start + half_len) << (32 - dip->i_depth); start 1490 fs/gfs2/dir.c if (index + MAX_RA_BLOCKS < f_ra->start) start 1493 fs/gfs2/dir.c f_ra->start = max((pgoff_t)index, f_ra->start); start 1495 fs/gfs2/dir.c if (f_ra->start >= hsize) /* if exceeded the hash table */ start 1499 fs/gfs2/dir.c blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]); start 1500 fs/gfs2/dir.c f_ra->start++; start 1545 fs/gfs2/dir.c f_ra->start = 0; start 97 fs/gfs2/export.c struct file_ra_state f_ra = { .start = 0 }; start 709 fs/gfs2/file.c static int gfs2_fsync(struct file *file, loff_t start, loff_t end, start 719 fs/gfs2/file.c ret1 = filemap_fdatawrite_range(mapping, start, end); start 741 fs/gfs2/file.c ret = file_fdatawait_range(file, start, end); start 2141 fs/gfs2/glock.c .start = gfs2_glock_seq_start, start 2148 fs/gfs2/glock.c .start = gfs2_glock_seq_start, start 2155 fs/gfs2/glock.c .start = gfs2_sbstats_seq_start, start 187 fs/gfs2/glops.c filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); start 188 fs/gfs2/glops.c error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); start 220 fs/gfs2/glops.c truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); start 56 fs/gfs2/incore.h int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start, start 382 fs/gfs2/incore.h loff_t start; start 2030 fs/gfs2/inode.c u64 start, u64 len) start 2042 fs/gfs2/inode.c ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops); start 748 fs/gfs2/lops.c static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, start 763 fs/gfs2/lops.c gfs2_replay_incr_blk(jd, &start); start 765 fs/gfs2/lops.c for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { start 770 fs/gfs2/lops.c if (gfs2_revoke_check(jd, blkno, start)) start 773 fs/gfs2/lops.c error = gfs2_replay_read_block(jd, start, &bh_log); start 920 fs/gfs2/lops.c static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, start 938 fs/gfs2/lops.c for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { start 939 fs/gfs2/lops.c error = gfs2_replay_read_block(jd, start, &bh); start 949 fs/gfs2/lops.c error = gfs2_revoke_add(jd, blkno, start); start 1002 fs/gfs2/lops.c static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, start 1017 fs/gfs2/lops.c gfs2_replay_incr_blk(jd, &start); start 1018 fs/gfs2/lops.c for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { start 1024 fs/gfs2/lops.c if (gfs2_revoke_check(jd, blkno, start)) start 1027 fs/gfs2/lops.c error = gfs2_replay_read_block(jd, start, &bh_log); start 74 fs/gfs2/lops.h static inline int lops_scan_elements(struct gfs2_jdesc *jd, u32 start, start 82 fs/gfs2/lops.h error = gfs2_log_ops[x]->lo_scan_elements(jd, start, start 193 fs/gfs2/recovery.c static int foreach_descriptor(struct gfs2_jdesc *jd, u32 start, start 206 fs/gfs2/recovery.c while (start != end) { start 207 fs/gfs2/recovery.c error = gfs2_replay_read_block(jd, start, &bh); start 219 fs/gfs2/recovery.c error = get_log_header(jd, start, &lh); start 221 fs/gfs2/recovery.c gfs2_replay_incr_blk(jd, &start); start 236 fs/gfs2/recovery.c error = lops_scan_elements(jd, start, ld, ptr, pass); start 243 fs/gfs2/recovery.c gfs2_replay_incr_blk(jd, &start); start 365 fs/gfs2/rgrp.c u8 *ptr, *start, *end; start 377 fs/gfs2/rgrp.c start = bi->bi_bh->b_data; start 379 fs/gfs2/rgrp.c start = bi->bi_clone; start 380 fs/gfs2/rgrp.c start += bi->bi_offset; start 381 fs/gfs2/rgrp.c end = start + bi->bi_bytes; start 383 fs/gfs2/rgrp.c start += (rbm.offset / GFS2_NBBY); start 384 fs/gfs2/rgrp.c bytes = min_t(u32, len / GFS2_NBBY, (end - start)); start 385 fs/gfs2/rgrp.c ptr = memchr_inv(start, 0, bytes); start 386 fs/gfs2/rgrp.c chunk_size = ((ptr == NULL) ? bytes : (ptr - start)); start 645 fs/gfs2/rgrp.c struct gfs2_bitmap *start, *last; start 658 fs/gfs2/rgrp.c start = rbm_bi(&rs->rs_rbm); start 661 fs/gfs2/rgrp.c clear_bit(GBF_FULL, &start->bi_flags); start 662 fs/gfs2/rgrp.c while (start++ != last); start 946 fs/gfs2/rgrp.c rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; start 1324 fs/gfs2/rgrp.c sector_t start = 0; start 1349 fs/gfs2/rgrp.c if ((start + nr_blks) != blk) { start 1352 fs/gfs2/rgrp.c start, nr_blks, start 1360 fs/gfs2/rgrp.c start = blk; start 1369 fs/gfs2/rgrp.c rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); start 1406 fs/gfs2/rgrp.c u64 start, end, minlen; start 1423 fs/gfs2/rgrp.c start = r.start >> bs_shift; start 1424 fs/gfs2/rgrp.c end = start + (r.len >> bs_shift); start 1428 fs/gfs2/rgrp.c if (end <= start || minlen > sdp->sd_max_rg_data) start 1431 fs/gfs2/rgrp.c rgd = gfs2_blk2rgrpd(sdp, start, 0); start 1435 fs/gfs2/rgrp.c && (start > rgd_end->rd_data0 + rgd_end->rd_data)) start 356 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_sbd *sdp, int start, u32 flags), start 358 fs/gfs2/trace_gfs2.h TP_ARGS(sdp, start, flags), start 362 fs/gfs2/trace_gfs2.h __field( int, start ) start 369 fs/gfs2/trace_gfs2.h __entry->start = start; start 376 fs/gfs2/trace_gfs2.h __entry->start ? "start" : "end", start 405 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start), start 407 fs/gfs2/trace_gfs2.h TP_ARGS(sdp, wbc, start), start 411 fs/gfs2/trace_gfs2.h __field( int, start ) start 418 fs/gfs2/trace_gfs2.h __entry->start = start; start 424 fs/gfs2/trace_gfs2.h MINOR(__entry->dev), __entry->start ? "start" : "end", start 556 fs/gfs2/trace_gfs2.h __field( u64, start ) start 567 fs/gfs2/trace_gfs2.h __entry->start = block; start 579 fs/gfs2/trace_gfs2.h (unsigned long long)__entry->start, start 599 fs/gfs2/trace_gfs2.h __field( u64, start ) start 611 fs/gfs2/trace_gfs2.h __entry->start = gfs2_rbm_to_block(&rs->rs_rbm); start 619 fs/gfs2/trace_gfs2.h (unsigned long long)__entry->start, start 32 fs/hfs/bitmap.c u32 mask, start, len, n; start 70 fs/hfs/bitmap.c start = (curr - bitmap) * 32 + i; start 71 fs/hfs/bitmap.c if (start >= size) start 72 fs/hfs/bitmap.c return start; start 74 fs/hfs/bitmap.c len = min(size - start, len); start 108 fs/hfs/bitmap.c *max = (curr - bitmap) * 32 + i - start; start 109 fs/hfs/bitmap.c return start; start 193 fs/hfs/bitmap.c int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count) start 203 fs/hfs/bitmap.c hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count); start 205 fs/hfs/bitmap.c if ((start + count) > HFS_SB(sb)->fs_ablocks) start 210 fs/hfs/bitmap.c curr = HFS_SB(sb)->bitmap + (start / 32); start 214 fs/hfs/bitmap.c i = start % 32; start 223 fs/hfs/extent.c u16 count, start; start 230 fs/hfs/extent.c start = be16_to_cpu(extent->block); start 231 fs/hfs/extent.c if (alloc_block != start + count) { start 251 fs/hfs/extent.c u16 count, start; start 267 fs/hfs/extent.c start = be16_to_cpu(extent->block); start 269 fs/hfs/extent.c hfs_clear_vbm_bits(sb, start, count); start 275 fs/hfs/extent.c hfs_clear_vbm_bits(sb, start + count, block_nr); start 290 fs/hfs/extent.c u32 total_blocks, blocks, start; start 323 fs/hfs/extent.c start = be16_to_cpu(fd.key->ext.FABN); start 324 fs/hfs/extent.c hfs_free_extents(sb, extent, total_blocks - start, total_blocks); start 326 fs/hfs/extent.c total_blocks = start; start 394 fs/hfs/extent.c u32 start, len, goal; start 408 fs/hfs/extent.c start = hfs_vbm_search_free(sb, goal, &len); start 414 fs/hfs/extent.c hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); start 419 fs/hfs/extent.c HFS_I(inode)->first_extents[0].block = cpu_to_be16(start); start 426 fs/hfs/extent.c start, len); start 438 fs/hfs/extent.c start, len); start 465 fs/hfs/extent.c HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start); start 480 fs/hfs/extent.c u16 blk_cnt, alloc_cnt, start; start 529 fs/hfs/extent.c start = HFS_I(inode)->cached_start; start 531 fs/hfs/extent.c alloc_cnt - start, alloc_cnt - blk_cnt); start 533 fs/hfs/extent.c if (blk_cnt > start) { start 537 fs/hfs/extent.c alloc_cnt = start; start 654 fs/hfs/inode.c static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, start 661 fs/hfs/inode.c ret = file_write_and_wait_range(filp, start, end); start 33 fs/hfs/mdb.c sector_t *start, sector_t *size) start 40 fs/hfs/mdb.c *start = 0; start 48 fs/hfs/mdb.c *start = (sector_t)te.cdte_addr.lba << 2; start 57 fs/hfs/mdb.c *start = (sector_t)ms_info.addr.lba << 2; start 26 fs/hfsplus/bitmap.c u32 mask, start, len, n; start 39 fs/hfsplus/bitmap.c start = size; start 84 fs/hfsplus/bitmap.c start = size; start 94 fs/hfsplus/bitmap.c start = size; start 98 fs/hfsplus/bitmap.c start = offset + (curr - pptr) * 32 + i; start 99 fs/hfsplus/bitmap.c if (start >= size) { start 104 fs/hfsplus/bitmap.c len = min(size - start, len); start 135 fs/hfsplus/bitmap.c start = size; start 155 fs/hfsplus/bitmap.c *max = offset + (curr - pptr) * 32 + i - start; start 158 fs/hfsplus/bitmap.c hfs_dbg(BITMAP, "-> %u,%u\n", start, *max); start 161 fs/hfsplus/bitmap.c return start; start 312 fs/hfsplus/extents.c u32 count, start; start 319 fs/hfsplus/extents.c start = be32_to_cpu(extent->start_block); start 320 fs/hfsplus/extents.c if (alloc_block != start + count) { start 341 fs/hfsplus/extents.c u32 count, start; start 361 fs/hfsplus/extents.c start = be32_to_cpu(extent->start_block); start 363 fs/hfsplus/extents.c err = hfsplus_block_free(sb, start, count); start 367 fs/hfsplus/extents.c start, count); start 374 fs/hfsplus/extents.c err = hfsplus_block_free(sb, start + count, block_nr); start 378 fs/hfsplus/extents.c start, count); start 401 fs/hfsplus/extents.c u32 total_blocks, blocks, start; start 426 fs/hfsplus/extents.c start = be32_to_cpu(fd.key->ext.start_block); start 430 fs/hfsplus/extents.c hfsplus_free_extents(sb, ext_entry, total_blocks - start, start 432 fs/hfsplus/extents.c total_blocks = start; start 445 fs/hfsplus/extents.c u32 start, len, goal; start 468 fs/hfsplus/extents.c start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len); start 469 fs/hfsplus/extents.c if (start >= sbi->total_blocks) { start 470 fs/hfsplus/extents.c start = hfsplus_block_allocate(sb, goal, 0, &len); start 471 fs/hfsplus/extents.c if (start >= goal) { start 478 fs/hfsplus/extents.c res = sb_issue_zeroout(sb, start, len, GFP_NOFS); start 483 fs/hfsplus/extents.c hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); start 489 fs/hfsplus/extents.c hip->first_extents[0].start_block = cpu_to_be32(start); start 496 fs/hfsplus/extents.c start, len); start 507 fs/hfsplus/extents.c start, len); start 532 fs/hfsplus/extents.c hip->cached_extents[0].start_block = cpu_to_be32(start); start 548 fs/hfsplus/extents.c u32 alloc_cnt, blk_cnt, start; start 604 fs/hfsplus/extents.c start = hip->cached_start; start 606 fs/hfsplus/extents.c alloc_cnt - start, alloc_cnt - blk_cnt); start 608 fs/hfsplus/extents.c if (blk_cnt > start) { start 612 fs/hfsplus/extents.c alloc_cnt = start; start 493 fs/hfsplus/hfsplus_fs.h int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, start 293 fs/hfsplus/inode.c int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, start 301 fs/hfsplus/inode.c error = file_write_and_wait_range(file, start, end); start 54 fs/hfsplus/wrapper.c loff_t start; start 63 fs/hfsplus/wrapper.c start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT; start 64 fs/hfsplus/wrapper.c offset = start & (io_size - 1); start 128 fs/hfsplus/wrapper.c sector_t *start, sector_t *size) start 135 fs/hfsplus/wrapper.c *start = 0; start 144 fs/hfsplus/wrapper.c *start = (sector_t)te.cdte_addr.lba << 2; start 154 fs/hfsplus/wrapper.c *start = (sector_t)ms_info.addr.lba << 2; start 365 fs/hostfs/hostfs_kern.c static int hostfs_fsync(struct file *file, loff_t start, loff_t end, start 371 fs/hostfs/hostfs_kern.c ret = file_write_and_wait_range(file, start, end); start 439 fs/hostfs/hostfs_kern.c loff_t start = page_offset(page); start 443 fs/hostfs/hostfs_kern.c bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, start 100 fs/hpfs/alloc.c int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) start 102 fs/hpfs/alloc.c if (start + len < start || start < 0x12 || start 103 fs/hpfs/alloc.c start + len > hpfs_sb(s)->sb_fs_size) { start 104 fs/hpfs/alloc.c hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start); start 110 fs/hpfs/alloc.c if (chk_if_allocated(s, start + i, msg)) return 1; start 503 fs/hpfs/alloc.c static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result) start 509 fs/hpfs/alloc.c end = start + len; start 510 fs/hpfs/alloc.c if (start < limit_start) start 511 fs/hpfs/alloc.c start = limit_start; start 514 fs/hpfs/alloc.c if (start >= end) start 516 fs/hpfs/alloc.c if (end - start < minlen) start 518 fs/hpfs/alloc.c err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0); start 521 fs/hpfs/alloc.c *result += end - start; start 525 fs/hpfs/alloc.c int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result) start 536 fs/hpfs/alloc.c if (start >= sbi->sb_fs_size) start 540 fs/hpfs/alloc.c if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) { start 552 fs/hpfs/alloc.c err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); start 559 fs/hpfs/alloc.c start_bmp = start >> 14; start 573 fs/hpfs/alloc.c err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result); start 23 fs/hpfs/file.c int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 28 fs/hpfs/file.c ret = file_write_and_wait_range(file, start, end); start 193 fs/hpfs/file.c static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) start 195 fs/hpfs/file.c return generic_block_fiemap(inode, fieinfo, start, len, hpfs_get_block); start 216 fs/hpfs/super.c r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed); start 356 fs/hugetlbfs/inode.c hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) start 364 fs/hugetlbfs/inode.c vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { start 374 fs/hugetlbfs/inode.c if (vma->vm_pgoff < start) start 375 fs/hugetlbfs/inode.c v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; start 419 fs/hugetlbfs/inode.c const pgoff_t start = lstart >> huge_page_shift(h); start 430 fs/hugetlbfs/inode.c next = start; start 492 fs/hugetlbfs/inode.c (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); start 581 fs/hugetlbfs/inode.c pgoff_t start, index, end; start 596 fs/hugetlbfs/inode.c start = offset >> hpage_shift; start 620 fs/hugetlbfs/inode.c for (index = start; index < end; index++) { start 1279 fs/io_uring.c off_t start, end; start 1288 fs/io_uring.c start = al->io_start & PAGE_MASK; start 1290 fs/io_uring.c if (kiocb->ki_pos >= start && kiocb->ki_pos <= end) start 3466 fs/io_uring.c unsigned long off, start, end, ubuf; start 3490 fs/io_uring.c start = ubuf >> PAGE_SHIFT; start 3491 fs/io_uring.c nr_pages = end - start; start 157 fs/ioctl.c u64 start, u64 len, u64 *new_len) start 166 fs/ioctl.c if (start > maxbytes) start 172 fs/ioctl.c if (len > maxbytes || (maxbytes - len) < start) start 173 fs/ioctl.c *new_len = maxbytes - start; start 291 fs/ioctl.c struct fiemap_extent_info *fieinfo, loff_t start, start 323 fs/ioctl.c start_blk = logical_to_blk(inode, start); start 324 fs/ioctl.c last_blk = logical_to_blk(inode, start + len - 1); start 450 fs/ioctl.c struct fiemap_extent_info *fieinfo, u64 start, start 455 fs/ioctl.c ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); start 402 fs/iomap/direct-io.c loff_t pos = iocb->ki_pos, start = pos; start 456 fs/iomap/direct-io.c if (filemap_range_has_page(mapping, start, end)) { start 463 fs/iomap/direct-io.c ret = filemap_write_and_wait_range(mapping, start, end); start 474 fs/iomap/direct-io.c start >> PAGE_SHIFT, end >> PAGE_SHIFT); start 68 fs/iomap/fiemap.c loff_t start, loff_t len, const struct iomap_ops *ops) start 88 fs/iomap/fiemap.c ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, start 98 fs/iomap/fiemap.c start += ret; start 973 fs/jbd2/journal.c int start; start 1030 fs/jbd2/journal.c .start = jbd2_seq_info_start, start 1113 fs/jbd2/journal.c unsigned long long start, int len, int blocksize) start 1158 fs/jbd2/journal.c journal->j_blk_offset = start; start 1167 fs/jbd2/journal.c bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); start 1210 fs/jbd2/journal.c unsigned long long start, int len, int blocksize) start 1214 fs/jbd2/journal.c journal = journal_init_common(bdev, fs_dev, start, len, blocksize); start 67 fs/jbd2/recovery.c static int do_readahead(journal_t *journal, unsigned int start) start 77 fs/jbd2/recovery.c max = start + (128 * 1024 / journal->j_blocksize); start 86 fs/jbd2/recovery.c for (next = start; next < max; next++) { start 32 fs/jffs2/file.c int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) start 38 fs/jffs2/file.c ret = file_write_and_wait_range(filp, start, end); start 246 fs/jffs2/file.c unsigned start = pos & (PAGE_SIZE - 1); start 247 fs/jffs2/file.c unsigned end = start + copied; start 248 fs/jffs2/file.c unsigned aligned_start = start & ~3; start 254 fs/jffs2/file.c start, end, pg->flags); start 304 fs/jffs2/file.c writtenlen -= min(writtenlen, (start - aligned_start)); start 317 fs/jffs2/file.c if (start+writtenlen < end) { start 36 fs/jffs2/gc.c uint32_t start, uint32_t end); start 39 fs/jffs2/gc.c uint32_t start, uint32_t end); start 503 fs/jffs2/gc.c uint32_t start = 0, end = 0, nrfrags = 0; start 539 fs/jffs2/gc.c start = frag->ofs; start 555 fs/jffs2/gc.c if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) { start 557 fs/jffs2/gc.c ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); start 560 fs/jffs2/gc.c ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end); start 1015 fs/jffs2/gc.c uint32_t start, uint32_t end) start 1024 fs/jffs2/gc.c f->inocache->ino, start, end); start 1058 fs/jffs2/gc.c start, end, f->inocache->ino); start 1065 fs/jffs2/gc.c start, end, f->inocache->ino); start 1077 fs/jffs2/gc.c ri.offset = cpu_to_je32(start); start 1078 fs/jffs2/gc.c ri.dsize = cpu_to_je32(end - start); start 1166 fs/jffs2/gc.c uint32_t start, uint32_t end) start 1180 fs/jffs2/gc.c f->inocache->ino, start, end); start 1183 fs/jffs2/gc.c orig_start = start; start 1196 fs/jffs2/gc.c min = start & ~(PAGE_SIZE-1); start 1199 fs/jffs2/gc.c frag = jffs2_lookup_node_frag(&f->fragtree, start); start 1203 fs/jffs2/gc.c BUG_ON(frag->ofs != start); start 1213 fs/jffs2/gc.c start = frag->ofs; start 1237 fs/jffs2/gc.c start = frag->ofs; start 1252 fs/jffs2/gc.c start = frag->ofs; start 1313 fs/jffs2/gc.c orig_start, orig_end, start, end); start 1317 fs/jffs2/gc.c BUG_ON(start > orig_start); start 1329 fs/jffs2/gc.c page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT, start 1341 fs/jffs2/gc.c offset = start; start 283 fs/jffs2/wbuf.c uint32_t start, end, ofs, len; start 324 fs/jffs2/wbuf.c start = ref_offset(first_raw); start 333 fs/jffs2/wbuf.c start, end, end - start, nr_refile); start 336 fs/jffs2/wbuf.c if (start < c->wbuf_ofs) { start 340 fs/jffs2/wbuf.c buf = kmalloc(end - start, GFP_KERNEL); start 348 fs/jffs2/wbuf.c ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, start 353 fs/jffs2/wbuf.c (retlen == c->wbuf_ofs - start)) start 356 fs/jffs2/wbuf.c if (ret || retlen != c->wbuf_ofs - start) { start 376 fs/jffs2/wbuf.c start = ref_offset(first_raw); start 378 fs/jffs2/wbuf.c start, end, end - start, nr_refile); start 382 fs/jffs2/wbuf.c memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); start 389 fs/jffs2/wbuf.c ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); start 408 fs/jffs2/wbuf.c if (end-start >= c->wbuf_pagesize) { start 415 fs/jffs2/wbuf.c uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); start 444 fs/jffs2/wbuf.c c->wbuf_len = (end - start) - towrite; start 451 fs/jffs2/wbuf.c memcpy(c->wbuf, buf, end-start); start 453 fs/jffs2/wbuf.c memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); start 456 fs/jffs2/wbuf.c c->wbuf_len = end - start; start 518 fs/jffs2/wbuf.c (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); start 19 fs/jfs/file.c int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 24 fs/jfs/file.c rc = file_write_and_wait_range(file, start, end); start 71 fs/jfs/jfs_discard.c u64 start, end, minlen; start 80 fs/jfs/jfs_discard.c start = range->start >> sb->s_blocksize_bits; start 81 fs/jfs/jfs_discard.c end = start + (range->len >> sb->s_blocksize_bits) - 1; start 87 fs/jfs/jfs_discard.c start >= bmp->db_mapsize || start 97 fs/jfs/jfs_discard.c agno = BLKTOAG(start, JFS_SBI(ip->i_sb)); start 1275 fs/jfs/jfs_dmap.c blkno = le64_to_cpu(dp->start) + (word << L2DBWORD); start 2003 fs/jfs/jfs_dmap.c blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD); start 3561 fs/jfs/jfs_dmap.c agno = le64_to_cpu(dp->start) >> l2agsize; start 3743 fs/jfs/jfs_dmap.c dp->start = cpu_to_le64(Blkno); start 150 fs/jfs/jfs_dmap.h __le64 start; /* 8: starting blkno for this dmap */ start 2686 fs/jfs/jfs_imap.c static int diFindFree(u32 word, int start) start 2689 fs/jfs/jfs_imap.c assert(start < 32); start 2691 fs/jfs/jfs_imap.c for (word <<= start, bitno = start; bitno < 32; start 14 fs/jfs/jfs_unicode.h wchar_t start; start 116 fs/jfs/jfs_unicode.h while (rp->start) { start 117 fs/jfs/jfs_unicode.h if (uc < rp->start) /* Before start of range */ start 120 fs/jfs/jfs_unicode.h return uc + rp->table[uc - rp->start]; start 171 fs/kernfs/file.c .start = kernfs_seq_start, start 995 fs/libfs.c int __generic_file_fsync(struct file *file, loff_t start, loff_t end, start 1002 fs/libfs.c err = file_write_and_wait_range(file, start, end); start 1037 fs/libfs.c int generic_file_fsync(struct file *file, loff_t start, loff_t end, start 1043 fs/libfs.c err = __generic_file_fsync(file, start, end, datasync); start 1082 fs/libfs.c int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 118 fs/lockd/xdr.c s32 start, len, end; start 131 fs/lockd/xdr.c start = ntohl(*p++); start 133 fs/lockd/xdr.c end = start + len - 1; start 135 fs/lockd/xdr.c fl->fl_start = s32_to_loff_t(start); start 150 fs/lockd/xdr.c s32 start, len; start 166 fs/lockd/xdr.c start = loff_t_to_s32(fl->fl_start); start 172 fs/lockd/xdr.c *p++ = htonl(start); start 110 fs/lockd/xdr4.c __u64 len, start; start 123 fs/lockd/xdr4.c p = xdr_decode_hyper(p, &start); start 125 fs/lockd/xdr4.c end = start + len - 1; start 127 fs/lockd/xdr4.c fl->fl_start = s64_to_loff_t(start); start 142 fs/lockd/xdr4.c s64 start, len; start 159 fs/lockd/xdr4.c start = loff_t_to_s64(fl->fl_start); start 165 fs/lockd/xdr4.c p = xdr_encode_hyper(p, start); start 1444 fs/locks.c int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start, start 1458 fs/locks.c fl.fl_start = start; start 2984 fs/locks.c .start = locks_start, start 1292 fs/namespace.c .start = m_start, start 142 fs/nfs/blocklayout/blocklayout.c return offset >= map->start && offset < map->start + map->len; start 170 fs/nfs/blocklayout/blocklayout.c disk_addr -= map->start; start 174 fs/nfs/blocklayout/blocklayout.c if (end >= map->start + map->len) start 175 fs/nfs/blocklayout/blocklayout.c *len = map->start + map->len - disk_addr; start 256 fs/nfs/blocklayout/blocklayout.c struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; start 313 fs/nfs/blocklayout/blocklayout.c map.start = NFS4_MAX_UINT64; start 374 fs/nfs/blocklayout/blocklayout.c u64 start = hdr->args.offset & (loff_t)PAGE_MASK; start 379 fs/nfs/blocklayout/blocklayout.c ext_tree_mark_written(bl, start >> SECTOR_SHIFT, start 380 fs/nfs/blocklayout/blocklayout.c (end - start) >> SECTOR_SHIFT, lwb); start 401 fs/nfs/blocklayout/blocklayout.c struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; start 521 fs/nfs/blocklayout/blocklayout.c u64 start; /* Expected start of next non-COW extent */ start 536 fs/nfs/blocklayout/blocklayout.c if (be->be_f_offset != lv->start) start 538 fs/nfs/blocklayout/blocklayout.c lv->start += be->be_length; start 543 fs/nfs/blocklayout/blocklayout.c if (be->be_f_offset != lv->start) start 545 fs/nfs/blocklayout/blocklayout.c if (lv->cowread > lv->start) start 547 fs/nfs/blocklayout/blocklayout.c lv->start += be->be_length; start 548 fs/nfs/blocklayout/blocklayout.c lv->inval = lv->start; start 551 fs/nfs/blocklayout/blocklayout.c if (be->be_f_offset != lv->start) start 553 fs/nfs/blocklayout/blocklayout.c lv->start += be->be_length; start 556 fs/nfs/blocklayout/blocklayout.c if (be->be_f_offset > lv->start) start 591 fs/nfs/blocklayout/blocklayout.c unsigned long start, end; start 602 fs/nfs/blocklayout/blocklayout.c start = end - PNFS_DEVICE_RETRY_TIMEOUT; start 603 fs/nfs/blocklayout/blocklayout.c if (!time_in_range(node->timestamp_unavailable, start, end)) { start 673 fs/nfs/blocklayout/blocklayout.c .start = lgr->range.offset >> SECTOR_SHIFT, start 721 fs/nfs/blocklayout/blocklayout.c lv.start << SECTOR_SHIFT) { start 727 fs/nfs/blocklayout/blocklayout.c if (lv.start < lv.cowread) { start 71 fs/nfs/blocklayout/blocklayout.h u64 start; start 95 fs/nfs/blocklayout/blocklayout.h u64 start; start 104 fs/nfs/blocklayout/blocklayout.h u64 start; start 182 fs/nfs/blocklayout/blocklayout.h int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start, start 184 fs/nfs/blocklayout/blocklayout.h int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, start 101 fs/nfs/blocklayout/dev.c p = xdr_decode_hyper(p, &b->slice.start); start 169 fs/nfs/blocklayout/dev.c map->start = dev->start; start 184 fs/nfs/blocklayout/dev.c if (child->start > offset || start 185 fs/nfs/blocklayout/dev.c child->start + child->len <= offset) start 188 fs/nfs/blocklayout/dev.c child->map(child, offset - child->start, map); start 223 fs/nfs/blocklayout/dev.c map->start += offset; start 411 fs/nfs/blocklayout/dev.c d->disk_offset = v->slice.start; start 436 fs/nfs/blocklayout/dev.c d->children[i].start += len; start 46 fs/nfs/blocklayout/extent_tree.c __ext_tree_search(struct rb_root *root, sector_t start) start 53 fs/nfs/blocklayout/extent_tree.c if (start < be->be_f_offset) start 55 fs/nfs/blocklayout/extent_tree.c else if (start >= ext_f_end(be)) start 62 fs/nfs/blocklayout/extent_tree.c if (start < be->be_f_offset) start 65 fs/nfs/blocklayout/extent_tree.c if (start >= ext_f_end(be)) start 178 fs/nfs/blocklayout/extent_tree.c sector_t start, sector_t end, struct list_head *tmp) start 185 fs/nfs/blocklayout/extent_tree.c be = __ext_tree_search(root, start); start 194 fs/nfs/blocklayout/extent_tree.c if (start > be->be_f_offset) start 195 fs/nfs/blocklayout/extent_tree.c len1 = start - be->be_f_offset; start 361 fs/nfs/blocklayout/extent_tree.c sector_t start, sector_t end) start 367 fs/nfs/blocklayout/extent_tree.c err = __ext_tree_remove(&bl->bl_ext_ro, start, end, &tmp); start 369 fs/nfs/blocklayout/extent_tree.c err2 = __ext_tree_remove(&bl->bl_ext_rw, start, end, &tmp); start 405 fs/nfs/blocklayout/extent_tree.c ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, start 409 fs/nfs/blocklayout/extent_tree.c sector_t end = start + len; start 418 fs/nfs/blocklayout/extent_tree.c err = __ext_tree_remove(&bl->bl_ext_ro, start, end, &tmp); start 425 fs/nfs/blocklayout/extent_tree.c for (be = __ext_tree_search(root, start); be; be = ext_tree_next(be)) { start 432 fs/nfs/blocklayout/extent_tree.c if (be->be_f_offset < start) { start 436 fs/nfs/blocklayout/extent_tree.c sector_t diff = start - be->be_f_offset; start 444 fs/nfs/blocklayout/extent_tree.c err = ext_tree_split(root, be, start); start 468 fs/nfs/blocklayout/extent_tree.c if (be->be_f_offset >= start && ext_f_end(be) <= end) { start 1101 fs/nfs/client.c .start = nfs_server_list_start, start 1113 fs/nfs/client.c .start = nfs_volume_list_start, start 960 fs/nfs/dir.c static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end, start 231 fs/nfs/file.c nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 239 fs/nfs/file.c ret = file_write_and_wait_range(file, start, end); start 250 fs/nfs/file.c start = 0; start 333 fs/nfs/file.c start: start 349 fs/nfs/file.c goto start; start 617 fs/nfs/flexfilelayout/flexfilelayout.c ktime_t start; start 622 fs/nfs/flexfilelayout/flexfilelayout.c start = timer->start_time; start 624 fs/nfs/flexfilelayout/flexfilelayout.c return ktime_sub(now, start); start 2015 fs/nfs/flexfilelayout/flexfilelayout.c __be32 *start; start 2017 fs/nfs/flexfilelayout/flexfilelayout.c start = xdr_reserve_space(xdr, 4); start 2018 fs/nfs/flexfilelayout/flexfilelayout.c if (unlikely(!start)) start 2021 fs/nfs/flexfilelayout/flexfilelayout.c *start = cpu_to_be32(ff_args->num_errors); start 2120 fs/nfs/flexfilelayout/flexfilelayout.c __be32 *start; start 2129 fs/nfs/flexfilelayout/flexfilelayout.c start = xdr_reserve_space(xdr, 4); start 2130 fs/nfs/flexfilelayout/flexfilelayout.c *start = cpu_to_be32(tmp_buf.len); start 2392 fs/nfs/flexfilelayout/flexfilelayout.c __be32 *start; start 2395 fs/nfs/flexfilelayout/flexfilelayout.c start = xdr_reserve_space(xdr, 4); start 2398 fs/nfs/flexfilelayout/flexfilelayout.c *start = cpu_to_be32((xdr->p - start - 1) * 4); start 365 fs/nfs/internal.h int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); start 309 fs/nfs/nfs4proc.c __be32 *start, *p; start 329 fs/nfs/nfs4proc.c start = p = kmap_atomic(*readdir->pages); start 357 fs/nfs/nfs4proc.c readdir->pgbase = (char *)p - (char *)start; start 359 fs/nfs/nfs4proc.c kunmap_atomic(start); start 813 fs/nfs/nfs4trace.h __field(loff_t, start) start 828 fs/nfs/nfs4trace.h __entry->start = request->fl_start; start 847 fs/nfs/nfs4trace.h (long long)__entry->start, start 883 fs/nfs/nfs4trace.h __field(loff_t, start) start 900 fs/nfs/nfs4trace.h __entry->start = request->fl_start; start 923 fs/nfs/nfs4trace.h (long long)__entry->start, start 477 fs/nfs/pnfs.c unsigned long start, end; start 483 fs/nfs/pnfs.c start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT; start 484 fs/nfs/pnfs.c if (!time_in_range(lo->plh_retry_timestamp, start, end)) { start 588 fs/nfs/pnfs.h pnfs_end_offset(u64 start, u64 len) start 590 fs/nfs/pnfs.h if (NFS4_MAX_UINT64 - start <= len) start 592 fs/nfs/pnfs.h return start + len; start 310 fs/nfs/pnfs_dev.c unsigned long start, end; start 313 fs/nfs/pnfs_dev.c start = end - PNFS_DEVICE_RETRY_TIMEOUT; start 314 fs/nfs/pnfs_dev.c if (time_in_range(node->timestamp_unavailable, start, end)) start 659 fs/nfs/proc.c __s32 start, end; start 661 fs/nfs/proc.c start = (__s32)fl->fl_start; start 662 fs/nfs/proc.c if ((loff_t)start != fl->fl_start) start 672 fs/nfs/proc.c if (start < 0 || start > end) start 169 fs/nfs/read.c unsigned long start = req->wb_pgbase; start 179 fs/nfs/read.c zero_user_segment(page, start, end); start 184 fs/nfs/read.c start += hdr->good_bytes - bytes; start 185 fs/nfs/read.c WARN_ON(start < req->wb_pgbase); start 186 fs/nfs/read.c zero_user_segment(page, start, end); start 1234 fs/nfsd/export.c .start = cache_seq_start_rcu, start 375 fs/nfsd/nfs4acl.c sort_pacl_range(struct posix_acl *pacl, int start, int end) { start 382 fs/nfsd/nfs4acl.c for (i = start; i < end; i++) { start 2496 fs/nfsd/nfs4state.c .start = states_start, start 6058 fs/nfsd/nfs4state.c end_offset(u64 start, u64 len) start 6062 fs/nfsd/nfs4state.c end = start + len; start 6063 fs/nfsd/nfs4state.c return end >= start ? end: NFS4_MAX_UINT64; start 6068 fs/nfsd/nfs4state.c last_byte_offset(u64 start, u64 len) start 6073 fs/nfsd/nfs4state.c end = start + len; start 6074 fs/nfsd/nfs4state.c return end > start ? end - 1: NFS4_MAX_UINT64; start 197 fs/nilfs2/bmap.c int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) start 202 fs/nilfs2/bmap.c ret = bmap->b_ops->bop_seek_key(bmap, start, keyp); start 151 fs/nilfs2/bmap.h int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp); start 1600 fs/nilfs2/btree.c static int nilfs_btree_seek_key(const struct nilfs_bmap *btree, __u64 start, start 1611 fs/nilfs2/btree.c ret = nilfs_btree_do_lookup(btree, path, start, NULL, minlevel, 0); start 1613 fs/nilfs2/btree.c *keyp = start; start 166 fs/nilfs2/cpfile.c unsigned long start, end, blkoff; start 172 fs/nilfs2/cpfile.c start = nilfs_cpfile_get_blkoff(cpfile, start_cno); start 175 fs/nilfs2/cpfile.c ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp); start 177 fs/nilfs2/cpfile.c *cnop = (blkoff == start) ? start_cno : start 312 fs/nilfs2/cpfile.c __u64 start, start 324 fs/nilfs2/cpfile.c if (unlikely(start == 0 || start > end)) { start 327 fs/nilfs2/cpfile.c (unsigned long long)start, (unsigned long long)end); start 339 fs/nilfs2/cpfile.c for (cno = start; cno < end; cno += ncps) { start 176 fs/nilfs2/dat.c __u64 start, end; start 183 fs/nilfs2/dat.c end = start = le64_to_cpu(entry->de_start); start 186 fs/nilfs2/dat.c WARN_ON(start > end); start 201 fs/nilfs2/dat.c __u64 start; start 208 fs/nilfs2/dat.c start = le64_to_cpu(entry->de_start); start 212 fs/nilfs2/dat.c if (start == nilfs_mdt_cno(dat) && blocknr == 0) start 331 fs/nilfs2/dir.c unsigned long start, n; start 343 fs/nilfs2/dir.c start = ei->i_dir_start_lookup; start 344 fs/nilfs2/dir.c if (start >= npages) start 345 fs/nilfs2/dir.c start = 0; start 346 fs/nilfs2/dir.c n = start; start 378 fs/nilfs2/dir.c } while (n != start); start 164 fs/nilfs2/direct.c static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start, start 169 fs/nilfs2/direct.c for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) { start 16 fs/nilfs2/file.c int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) start 33 fs/nilfs2/file.c start, end); start 281 fs/nilfs2/inode.c unsigned int start = pos & (PAGE_SIZE - 1); start 285 fs/nilfs2/inode.c nr_dirty = nilfs_page_count_clean_buffers(page, start, start 286 fs/nilfs2/inode.c start + copied); start 996 fs/nilfs2/inode.c __u64 start, __u64 len) start 1016 fs/nilfs2/inode.c blkoff = start >> blkbits; start 1017 fs/nilfs2/inode.c end_blkoff = (start + len - 1) >> blkbits; start 284 fs/nilfs2/mdt.c int nilfs_mdt_find_block(struct inode *inode, unsigned long start, start 291 fs/nilfs2/mdt.c if (unlikely(start > end)) start 294 fs/nilfs2/mdt.c ret = nilfs_mdt_read_block(inode, start, true, out_bh); start 296 fs/nilfs2/mdt.c *blkoff = start; start 299 fs/nilfs2/mdt.c if (unlikely(ret != -ENOENT || start == ULONG_MAX)) start 302 fs/nilfs2/mdt.c ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next); start 73 fs/nilfs2/mdt.h int nilfs_mdt_find_block(struct inode *inode, unsigned long start, start 279 fs/nilfs2/nilfs.h __u64 start, __u64 len); start 95 fs/nilfs2/recovery.c sector_t start, unsigned long nblock) start 110 fs/nilfs2/recovery.c bh = __bread(nilfs->ns_bdev, ++start, blocksize); start 23 fs/nilfs2/segbuf.c int start, end; /* The region to be submitted */ start 364 fs/nilfs2/segbuf.c wi->rest_blocks -= wi->end - wi->start; start 366 fs/nilfs2/segbuf.c wi->start = wi->end; start 383 fs/nilfs2/segbuf.c static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, start 396 fs/nilfs2/segbuf.c start << (nilfs->ns_blocksize_bits - 9); start 408 fs/nilfs2/segbuf.c wi->start = wi->end = 0; start 685 fs/nilfs2/segment.c loff_t start, loff_t end) start 693 fs/nilfs2/segment.c if (unlikely(start != 0 || end != LLONG_MAX)) { start 699 fs/nilfs2/segment.c index = start >> PAGE_SHIFT; start 2268 fs/nilfs2/segment.c loff_t start, loff_t end) start 2300 fs/nilfs2/segment.c sci->sc_dsync_start = start; start 267 fs/nilfs2/sufile.c int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) start 276 fs/nilfs2/sufile.c if (start <= end && end < nsegs) { start 277 fs/nilfs2/sufile.c sui->allocmin = start; start 641 fs/nilfs2/sufile.c __u64 start, __u64 end) start 659 fs/nilfs2/sufile.c if (start > end || start >= nsegs) start 669 fs/nilfs2/sufile.c for (segnum = start; segnum <= end; segnum += n) { start 1025 fs/nilfs2/sufile.c sector_t start = 0, nblocks = 0; start 1036 fs/nilfs2/sufile.c if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) start 1039 fs/nilfs2/sufile.c start_block = (range->start + nilfs->ns_blocksize - 1) >> start 1083 fs/nilfs2/sufile.c start = seg_start; start 1088 fs/nilfs2/sufile.c if (start + nblocks == seg_start) { start 1095 fs/nilfs2/sufile.c if (start < start_block) { start 1096 fs/nilfs2/sufile.c nblocks -= start_block - start; start 1097 fs/nilfs2/sufile.c start = start_block; start 1104 fs/nilfs2/sufile.c start * sects_per_block, start 1119 fs/nilfs2/sufile.c start = seg_start; start 1129 fs/nilfs2/sufile.c if (start < start_block) { start 1130 fs/nilfs2/sufile.c nblocks -= start_block - start; start 1131 fs/nilfs2/sufile.c start = start_block; start 1133 fs/nilfs2/sufile.c if (start + nblocks > end_block + 1) start 1134 fs/nilfs2/sufile.c nblocks = end_block - start + 1; start 1138 fs/nilfs2/sufile.c start * sects_per_block, start 25 fs/nilfs2/sufile.h int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end); start 661 fs/nilfs2/the_nilfs.c sector_t start = 0, nblocks = 0; start 672 fs/nilfs2/the_nilfs.c start = seg_start; start 674 fs/nilfs2/the_nilfs.c } else if (start + nblocks == seg_start) { start 678 fs/nilfs2/the_nilfs.c start * sects_per_block, start 688 fs/nilfs2/the_nilfs.c start * sects_per_block, start 339 fs/notify/fanotify/fanotify_user.c char __user *start; start 343 fs/notify/fanotify/fanotify_user.c start = buf; start 365 fs/notify/fanotify/fanotify_user.c if (start != buf) start 409 fs/notify/fanotify/fanotify_user.c if (start != buf && ret != -EFAULT) start 410 fs/notify/fanotify/fanotify_user.c ret = buf - start; start 217 fs/notify/inotify/inotify_user.c char __user *start; start 221 fs/notify/inotify/inotify_user.c start = buf; start 252 fs/notify/inotify/inotify_user.c if (start != buf) start 259 fs/notify/inotify/inotify_user.c if (start != buf && ret != -EFAULT) start 260 fs/notify/inotify/inotify_user.c ret = buf - start; start 1894 fs/ntfs/attrib.c s64 ll, allocated_size, start = data_start; start 1919 fs/ntfs/attrib.c (unsigned long long)start); start 1927 fs/ntfs/attrib.c if (start > 0) start 1928 fs/ntfs/attrib.c start &= ~(s64)vol->cluster_size_mask; start 1940 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) { start 2080 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2095 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) { start 2150 fs/ntfs/attrib.c if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA || start 2173 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2211 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2224 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2256 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2278 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2292 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 2362 fs/ntfs/attrib.c if (start < 0 || start >= allocated_size) start 1486 fs/ntfs/dir.c static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, start 1495 fs/ntfs/dir.c err = file_write_and_wait_range(filp, start, end); start 1970 fs/ntfs/file.c static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, start 1978 fs/ntfs/file.c err = file_write_and_wait_range(filp, start, end); start 609 fs/ntfs/runlist.c bool start; start 614 fs/ntfs/runlist.c start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */ start 628 fs/ntfs/runlist.c ntfs_debug("start = %i, finish = %i", start, finish); start 631 fs/ntfs/runlist.c if (start) { start 1631 fs/ntfs/runlist.c const VCN start, const s64 length) start 1633 fs/ntfs/runlist.c const VCN end = start + length; start 1640 fs/ntfs/runlist.c (long long)start, (long long)length); start 1642 fs/ntfs/runlist.c BUG_ON(start < 0); start 1647 fs/ntfs/runlist.c if (likely(!start && !length)) start 1652 fs/ntfs/runlist.c while (likely(rl->length && start >= rl[1].vcn)) start 1731 fs/ntfs/runlist.c if (start == rl->vcn) { start 1793 fs/ntfs/runlist.c rl->length = start - rl->vcn; start 1800 fs/ntfs/runlist.c rl->vcn = start; start 1801 fs/ntfs/runlist.c rl->length = rl[1].vcn - start; start 1822 fs/ntfs/runlist.c rl->length = start - rl->vcn; start 1824 fs/ntfs/runlist.c rl->vcn = start; start 1839 fs/ntfs/runlist.c rl->length = start - rl->vcn; start 1846 fs/ntfs/runlist.c delta = rl->vcn - start; start 1847 fs/ntfs/runlist.c rl->vcn = start; start 1876 fs/ntfs/runlist.c rl->length = start - rl->vcn; start 1878 fs/ntfs/runlist.c rl->vcn = start; start 84 fs/ntfs/runlist.h const VCN start, const s64 length); start 577 fs/ocfs2/alloc.c int i, start = 0, depth = 0; start 581 fs/ocfs2/alloc.c start = 1; start 583 fs/ocfs2/alloc.c for(i = start; i < path_num_items(path); i++) { start 2370 fs/ocfs2/alloc.c int ret, start, orig_credits = handle->h_buffer_credits; start 2464 fs/ocfs2/alloc.c start = ocfs2_find_subtree_root(et, left_path, right_path); start 2466 fs/ocfs2/alloc.c trace_ocfs2_rotate_subtree(start, start 2468 fs/ocfs2/alloc.c right_path->p_node[start].bh->b_blocknr, start 2471 fs/ocfs2/alloc.c ret = ocfs2_extend_rotate_transaction(handle, start, start 2479 fs/ocfs2/alloc.c right_path, start); start 6843 fs/ocfs2/alloc.c static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, start 6861 fs/ocfs2/alloc.c from = start & (PAGE_SIZE - 1); start 6871 fs/ocfs2/alloc.c start = (page->index + 1) << PAGE_SHIFT; start 6878 fs/ocfs2/alloc.c int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, start 6886 fs/ocfs2/alloc.c BUG_ON(start > end); start 6890 fs/ocfs2/alloc.c index = start >> PAGE_SHIFT; start 6915 fs/ocfs2/alloc.c static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, start 6920 fs/ocfs2/alloc.c BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != start 6923 fs/ocfs2/alloc.c return ocfs2_grab_pages(inode, start, end, pages, num); start 7255 fs/ocfs2/alloc.c start: start 7322 fs/ocfs2/alloc.c goto start; start 7378 fs/ocfs2/alloc.c goto start; start 7397 fs/ocfs2/alloc.c unsigned int start, unsigned int end, int trunc) start 7407 fs/ocfs2/alloc.c if (start >= i_size_read(inode)) start 7413 fs/ocfs2/alloc.c BUG_ON(start > end); start 7442 fs/ocfs2/alloc.c numbytes = end - start; start 7443 fs/ocfs2/alloc.c memset(idata->id_data + start, 0, numbytes); start 7452 fs/ocfs2/alloc.c i_size_write(inode, start); start 7453 fs/ocfs2/alloc.c di->i_size = cpu_to_le64(start); start 7474 fs/ocfs2/alloc.c u64 group, u32 start, u32 count) start 7480 fs/ocfs2/alloc.c discard = ocfs2_clusters_to_blocks(sb, start); start 7500 fs/ocfs2/alloc.c u32 start, u32 max, u32 minbits) start 7509 fs/ocfs2/alloc.c start, max, minbits); start 7511 fs/ocfs2/alloc.c while (start < max) { start 7512 fs/ocfs2/alloc.c start = ocfs2_find_next_zero_bit(bitmap, max, start); start 7513 fs/ocfs2/alloc.c if (start >= max) start 7515 fs/ocfs2/alloc.c next = ocfs2_find_next_bit(bitmap, max, start); start 7517 fs/ocfs2/alloc.c if ((next - start) >= minbits) { start 7519 fs/ocfs2/alloc.c start, next - start); start 7524 fs/ocfs2/alloc.c count += next - start; start 7526 fs/ocfs2/alloc.c start = next + 1; start 7547 fs/ocfs2/alloc.c u64 start, len, trimmed = 0, first_group, last_group = 0, group = 0; start 7556 fs/ocfs2/alloc.c start = range->start >> osb->s_clustersize_bits; start 7563 fs/ocfs2/alloc.c trace_ocfs2_trim_mainbm(start, len, minlen); start 7588 fs/ocfs2/alloc.c if (start >= le32_to_cpu(main_bm->i_clusters)) { start 7593 fs/ocfs2/alloc.c if (start + len > le32_to_cpu(main_bm->i_clusters)) start 7594 fs/ocfs2/alloc.c len = le32_to_cpu(main_bm->i_clusters) - start; start 7600 fs/ocfs2/alloc.c first_group = ocfs2_which_cluster_group(main_bm_inode, start); start 7602 fs/ocfs2/alloc.c first_bit = start; start 7604 fs/ocfs2/alloc.c first_bit = start - ocfs2_blocks_to_clusters(sb, start 7607 fs/ocfs2/alloc.c start + len - 1); start 7673 fs/ocfs2/alloc.c trace_ocfs2_trim_fs(range->start, range->len, range->minlen); start 7694 fs/ocfs2/alloc.c info.tf_start == range->start && start 7707 fs/ocfs2/alloc.c info.tf_start = range->start; start 223 fs/ocfs2/alloc.h unsigned int start, unsigned int end, int trunc); start 259 fs/ocfs2/alloc.h int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, start 285 fs/ocfs2/aops.c loff_t start = (loff_t)page->index << PAGE_SHIFT; start 322 fs/ocfs2/aops.c if (start >= i_size_read(inode)) { start 360 fs/ocfs2/aops.c loff_t start; start 388 fs/ocfs2/aops.c start = (loff_t)last->index << PAGE_SHIFT; start 389 fs/ocfs2/aops.c if (start >= i_size_read(inode)) start 520 fs/ocfs2/aops.c unsigned int *start, start 539 fs/ocfs2/aops.c if (start) start 540 fs/ocfs2/aops.c *start = cluster_start; start 905 fs/ocfs2/aops.c unsigned start, end; start 907 fs/ocfs2/aops.c start = max(from, block_start); start 910 fs/ocfs2/aops.c zero_user_segment(page, start, end); start 1044 fs/ocfs2/aops.c unsigned long start, target_index, end_index, index; start 1059 fs/ocfs2/aops.c start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); start 1068 fs/ocfs2/aops.c if ((start + wc->w_num_pages) > end_index) start 1069 fs/ocfs2/aops.c wc->w_num_pages = end_index - start; start 1072 fs/ocfs2/aops.c start = target_index; start 1077 fs/ocfs2/aops.c index = start + i; start 1967 fs/ocfs2/aops.c unsigned from, to, start = pos & (PAGE_SIZE - 1); start 1996 fs/ocfs2/aops.c ocfs2_zero_new_buffers(wc->w_target_page, start+copied, start 1997 fs/ocfs2/aops.c start+len); start 160 fs/ocfs2/cluster/netdebug.c .start = nst_seq_start, start 374 fs/ocfs2/cluster/netdebug.c .start = sc_seq_start, start 653 fs/ocfs2/dir.c static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num, start 658 fs/ocfs2/dir.c ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0, start 673 fs/ocfs2/dir.c unsigned long start, block, b; start 684 fs/ocfs2/dir.c start = OCFS2_I(dir)->ip_dir_start_lookup; start 685 fs/ocfs2/dir.c if (start >= nblocks) start 686 fs/ocfs2/dir.c start = 0; start 687 fs/ocfs2/dir.c block = start; start 704 fs/ocfs2/dir.c if (b >= nblocks || (num && block == start)) { start 743 fs/ocfs2/dir.c } while (block != start); start 752 fs/ocfs2/dir.c start = 0; start 1189 fs/ocfs2/dir.c char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data; start 1194 fs/ocfs2/dir.c limit = start + size; start 1195 fs/ocfs2/dir.c de_buf = start; start 2155 fs/ocfs2/dir.c char *start, start 2158 fs/ocfs2/dir.c struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start; start 2716 fs/ocfs2/dir.c static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size, start 2732 fs/ocfs2/dir.c limit = start + old_size; start 2733 fs/ocfs2/dir.c de_buf = start; start 606 fs/ocfs2/dlm/dlmdebug.c .start = lockres_seq_start, start 3024 fs/ocfs2/dlmglue.c static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start, start 3032 fs/ocfs2/dlmglue.c list_for_each_entry(iter, &start->l_debug_list, l_debug_list) { start 3230 fs/ocfs2/dlmglue.c .start = ocfs2_dlm_seq_start, start 166 fs/ocfs2/file.c static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, start 187 fs/ocfs2/file.c err = file_write_and_wait_range(file, start, end); start 1430 fs/ocfs2/file.c u64 start, u64 len) start 1434 fs/ocfs2/file.c u64 end = start + len; start 1461 fs/ocfs2/file.c cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; start 1462 fs/ocfs2/file.c clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); start 1515 fs/ocfs2/file.c loff_t start, end; start 1518 fs/ocfs2/file.c start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start); start 1522 fs/ocfs2/file.c if (start < end) { start 1523 fs/ocfs2/file.c unmap_mapping_range(mapping, start, end - start, 0); start 1524 fs/ocfs2/file.c truncate_inode_pages_range(mapping, start, end - 1); start 1529 fs/ocfs2/file.c u64 start, u64 len) start 1533 fs/ocfs2/file.c u64 end = start + len; start 1547 fs/ocfs2/file.c (unsigned long long)start, (unsigned long long)end); start 1554 fs/ocfs2/file.c if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) start 1571 fs/ocfs2/file.c if ((start & (csize - 1)) != 0) { start 1577 fs/ocfs2/file.c (start & ~(osb->s_clustersize - 1)); start 1582 fs/ocfs2/file.c (unsigned long long)start, start 1585 fs/ocfs2/file.c ret = ocfs2_zero_range_for_truncate(inode, handle, start, start 1597 fs/ocfs2/file.c start = end & ~(osb->s_clustersize - 1); start 1600 fs/ocfs2/file.c (unsigned long long)start, (unsigned long long)end); start 1602 fs/ocfs2/file.c ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); start 722 fs/ocfs2/localalloc.c int status, start; start 734 fs/ocfs2/localalloc.c start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted, start 736 fs/ocfs2/localalloc.c if (start == -1) { start 744 fs/ocfs2/localalloc.c *bit_off = le32_to_cpu(la->la_bm_off) + start; start 756 fs/ocfs2/localalloc.c ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start, start 760 fs/ocfs2/localalloc.c ocfs2_set_bit(start++, bitmap); start 777 fs/ocfs2/localalloc.c int status, start; start 791 fs/ocfs2/localalloc.c start = bit_off - le32_to_cpu(la->la_bm_off); start 804 fs/ocfs2/localalloc.c ocfs2_clear_bit(start++, bitmap); start 927 fs/ocfs2/localalloc.c unsigned int start, start 932 fs/ocfs2/localalloc.c if (ocfs2_test_bit(start + tmp, bitmap)) { start 934 fs/ocfs2/localalloc.c "%u\n", start, count); start 936 fs/ocfs2/localalloc.c start + tmp); start 956 fs/ocfs2/localalloc.c int bit_off, left, count, start; start 978 fs/ocfs2/localalloc.c start = count = bit_off = 0; start 981 fs/ocfs2/localalloc.c while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) start 983 fs/ocfs2/localalloc.c if ((bit_off < left) && (bit_off == start)) { start 985 fs/ocfs2/localalloc.c start++; start 991 fs/ocfs2/localalloc.c start - count); start 994 fs/ocfs2/localalloc.c count, start - count, start 1010 fs/ocfs2/localalloc.c start = bit_off + 1; start 921 fs/ocfs2/ocfs2.h int start) start 926 fs/ocfs2/ocfs2.h start += fix; start 928 fs/ocfs2/ocfs2.h ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix; start 626 fs/ocfs2/ocfs2_trace.h unsigned int start, unsigned int num), start 627 fs/ocfs2/ocfs2_trace.h TP_ARGS(blkno, index, start, num), start 631 fs/ocfs2/ocfs2_trace.h __field(unsigned int, start) start 637 fs/ocfs2/ocfs2_trace.h __entry->start = start; start 642 fs/ocfs2/ocfs2_trace.h __entry->start, __entry->num) start 648 fs/ocfs2/ocfs2_trace.h unsigned int start, unsigned int num), \ start 649 fs/ocfs2/ocfs2_trace.h TP_ARGS(blkno, index, start, num)) start 1891 fs/ocfs2/ocfs2_trace.h TP_PROTO(unsigned int start, unsigned int end, unsigned int len, start 1893 fs/ocfs2/ocfs2_trace.h TP_ARGS(start, end, len, last_start, last_len), start 1895 fs/ocfs2/ocfs2_trace.h __field(unsigned int, start) start 1902 fs/ocfs2/ocfs2_trace.h __entry->start = start; start 1908 fs/ocfs2/ocfs2_trace.h TP_printk("%u %u %u %u %u", __entry->start, __entry->end, start 1947 fs/ocfs2/ocfs2_trace.h TP_PROTO(unsigned int start, unsigned int end, unsigned int len, start 1949 fs/ocfs2/ocfs2_trace.h TP_ARGS(start, end, len, last_start, last_len), start 1951 fs/ocfs2/ocfs2_trace.h __field(unsigned int, start) start 1958 fs/ocfs2/ocfs2_trace.h __entry->start = start; start 1964 fs/ocfs2/ocfs2_trace.h TP_printk("%u %u %u %u %u", __entry->start, __entry->end, start 2590 fs/ocfs2/refcounttree.c unsigned int start, start 2593 fs/ocfs2/refcounttree.c BUG_ON(start > cpos); start 2595 fs/ocfs2/refcounttree.c return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); start 120 fs/ocfs2/reservations.c unsigned int start = resv->r_start; start 123 fs/ocfs2/reservations.c while (start <= end) { start 124 fs/ocfs2/reservations.c if (ocfs2_test_bit(start, disk_bitmap)) { start 127 fs/ocfs2/reservations.c "starting at bit %u!\n", i, start); start 131 fs/ocfs2/reservations.c start++; start 415 fs/ocfs2/reservations.c int offset, start, found; start 422 fs/ocfs2/reservations.c start = search_start; start 424 fs/ocfs2/reservations.c start)) != -1) { start 429 fs/ocfs2/reservations.c if (offset == start) { start 433 fs/ocfs2/reservations.c start++; start 437 fs/ocfs2/reservations.c start = offset + 1; start 441 fs/ocfs2/reservations.c best_start = start - found; start 762 fs/ocfs2/reservations.c unsigned int start, unsigned int end) start 767 fs/ocfs2/reservations.c BUG_ON(start != resv->r_start || old_end < end); start 1282 fs/ocfs2/suballoc.c int offset, start, found, status = 0; start 1289 fs/ocfs2/suballoc.c found = start = best_offset = best_size = 0; start 1292 fs/ocfs2/suballoc.c while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) { start 1300 fs/ocfs2/suballoc.c start = offset + 1; start 1301 fs/ocfs2/suballoc.c } else if (offset == start) { start 1305 fs/ocfs2/suballoc.c start++; start 1309 fs/ocfs2/suballoc.c start = offset + 1; start 1313 fs/ocfs2/suballoc.c best_offset = start - found; start 4633 fs/ocfs2/xattr.c int count, start, len, name_value_len = 0, name_offset = 0; start 4689 fs/ocfs2/xattr.c start = ocfs2_xattr_find_divide_pos(xh); start 4691 fs/ocfs2/xattr.c if (start == count) { start 4692 fs/ocfs2/xattr.c xe = &xh->xh_entries[start-1]; start 4722 fs/ocfs2/xattr.c for (i = 0; i < start; i++) { start 4737 fs/ocfs2/xattr.c xe = &xh->xh_entries[start]; start 4738 fs/ocfs2/xattr.c len = sizeof(struct ocfs2_xattr_entry) * (count - start); start 4743 fs/ocfs2/xattr.c xe = &xh->xh_entries[count - start]; start 4744 fs/ocfs2/xattr.c len = sizeof(struct ocfs2_xattr_entry) * start; start 4747 fs/ocfs2/xattr.c le16_add_cpu(&xh->xh_count, -start); start 4777 fs/ocfs2/xattr.c if (start == count) start 4781 fs/ocfs2/xattr.c memset(&xh->xh_entries[start], 0, start 4782 fs/ocfs2/xattr.c sizeof(struct ocfs2_xattr_entry) * (count - start)); start 4783 fs/ocfs2/xattr.c xh->xh_count = cpu_to_le16(start); start 75 fs/omfs/file.c u64 start, count; start 76 fs/omfs/file.c start = be64_to_cpu(entry->e_cluster); start 79 fs/omfs/file.c omfs_clear_range(inode->i_sb, start, (int) count); start 139 fs/openpromfs/inode.c .start = property_start, start 523 fs/orangefs/file.c loff_t start, start 533 fs/orangefs/file.c start, end); start 110 fs/orangefs/orangefs-debugfs.c .start = help_start, start 294 fs/overlayfs/file.c static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 307 fs/overlayfs/file.c ret = vfs_fsync_range(real.file, start, end, datasync); start 464 fs/overlayfs/inode.c u64 start, u64 len) start 478 fs/overlayfs/inode.c err = realinode->i_op->fiemap(realinode, fieinfo, start, len); start 823 fs/overlayfs/readdir.c static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, start 864 fs/overlayfs/readdir.c return vfs_fsync_range(realfile, start, end, datasync); start 667 fs/proc/array.c struct task_struct *start, *task; start 672 fs/proc/array.c start = pid_task(proc_pid(inode), PIDTYPE_PID); start 673 fs/proc/array.c if (!start) start 682 fs/proc/array.c if (task && task->real_parent == start && start 684 fs/proc/array.c if (list_is_last(&task->sibling, &start->children)) start 708 fs/proc/array.c list_for_each_entry(task, &start->children, sibling) { start 750 fs/proc/array.c .start = children_seq_start, start 1923 fs/proc/base.c unsigned long *start, unsigned long *end) start 1954 fs/proc/base.c *start = sval; start 2053 fs/proc/base.c unsigned long start; start 2224 fs/proc/base.c p->start = vma->vm_start; start 2236 fs/proc/base.c len = snprintf(buf, sizeof(buf), "%lx-%lx", p->start, p->end); start 2332 fs/proc/base.c .start = timers_start, start 3632 fs/proc/base.c static struct task_struct *next_tid(struct task_struct *start) start 3636 fs/proc/base.c if (pid_alive(start)) { start 3637 fs/proc/base.c pos = next_thread(start); start 3644 fs/proc/base.c put_task_struct(start); start 87 fs/proc/consoles.c .start = c_start, start 48 fs/proc/devices.c .start = devinfo_start, start 31 fs/proc/interrupts.c .start = int_seq_start, start 149 fs/proc/kcore.c unsigned long start, end; start 153 fs/proc/kcore.c start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; start 160 fs/proc/kcore.c if (start < tmp->addr + tmp->size) start 164 fs/proc/kcore.c if (start < end) { start 168 fs/proc/kcore.c vmm->addr = start; start 169 fs/proc/kcore.c vmm->size = end - start; start 322 fs/proc/kcore.c unsigned long start; start 467 fs/proc/kcore.c start = kc_offset_to_vaddr(*fpos - data_offset); start 468 fs/proc/kcore.c if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) start 477 fs/proc/kcore.c if (!m || start < m->addr || start >= m->addr + m->size) { start 479 fs/proc/kcore.c if (start >= m->addr && start 480 fs/proc/kcore.c start < m->addr + m->size) start 491 fs/proc/kcore.c } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) { start 497 fs/proc/kcore.c vread(buf, (char *)start, tsz); start 505 fs/proc/kcore.c if (copy_to_user(buffer, (char *)start, tsz)) { start 510 fs/proc/kcore.c if (kern_addr_valid(start)) { start 515 fs/proc/kcore.c if (probe_kernel_read(buf, (void *) start, tsz)) { start 536 fs/proc/kcore.c start += tsz; start 106 fs/proc/nommu.c .start = nommu_region_list_start, start 123 fs/proc/proc_tty.c .start = t_start, start 280 fs/proc/task_mmu.c unsigned long start, unsigned long end, start 285 fs/proc/task_mmu.c seq_put_hex_ll(m, NULL, start, 8); start 307 fs/proc/task_mmu.c unsigned long start, end; start 318 fs/proc/task_mmu.c start = vma->vm_start; start 320 fs/proc/task_mmu.c show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); start 371 fs/proc/task_mmu.c .start = m_start, start 908 fs/proc/task_mmu.c .start = m_start, start 1107 fs/proc/task_mmu.c static int clear_refs_test_walk(unsigned long start, unsigned long end, start 1284 fs/proc/task_mmu.c static int pagemap_pte_hole(unsigned long start, unsigned long end, start 1288 fs/proc/task_mmu.c unsigned long addr = start; start 1895 fs/proc/task_mmu.c .start = m_start, start 252 fs/proc/task_nommu.c .start = m_start, start 220 fs/proc/vmcore.c static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) start 230 fs/proc/vmcore.c if (start < offset + dump->size) { start 231 fs/proc/vmcore.c tsz = min(offset + (u64)dump->size - start, (u64)size); start 232 fs/proc/vmcore.c buf = dump->buf + start - offset; start 239 fs/proc/vmcore.c start += tsz; start 256 fs/proc/vmcore.c u64 start, size_t size) start 266 fs/proc/vmcore.c if (start < offset + dump->size) { start 267 fs/proc/vmcore.c tsz = min(offset + (u64)dump->size - start, (u64)size); start 268 fs/proc/vmcore.c buf = dump->buf + start - offset; start 276 fs/proc/vmcore.c start += tsz; start 301 fs/proc/vmcore.c u64 start; start 344 fs/proc/vmcore.c start = *fpos - elfcorebuf_sz; start 345 fs/proc/vmcore.c if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf)) start 380 fs/proc/vmcore.c start = m->paddr + *fpos - m->offset; start 381 fs/proc/vmcore.c tmp = read_from_oldmem(buffer, tsz, &start, start 554 fs/proc/vmcore.c u64 start, end, len, tsz; start 557 fs/proc/vmcore.c start = (u64)vma->vm_pgoff << PAGE_SHIFT; start 558 fs/proc/vmcore.c end = start + size; start 572 fs/proc/vmcore.c if (start < elfcorebuf_sz) { start 575 fs/proc/vmcore.c tsz = min(elfcorebuf_sz - (size_t)start, size); start 576 fs/proc/vmcore.c pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; start 581 fs/proc/vmcore.c start += tsz; start 588 fs/proc/vmcore.c if (start < elfcorebuf_sz + elfnotes_sz) { start 604 fs/proc/vmcore.c if (start < elfcorebuf_sz + vmcoredd_orig_sz) { start 608 fs/proc/vmcore.c (size_t)start, size); start 609 fs/proc/vmcore.c start_off = start - elfcorebuf_sz; start 615 fs/proc/vmcore.c start += tsz; start 625 fs/proc/vmcore.c tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); start 626 fs/proc/vmcore.c kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; start 632 fs/proc/vmcore.c start += tsz; start 640 fs/proc/vmcore.c if (start < m->offset + m->size) { start 644 fs/proc/vmcore.c m->offset + m->size - start, size); start 645 fs/proc/vmcore.c paddr = m->paddr + start - m->offset; start 651 fs/proc/vmcore.c start += tsz; start 1097 fs/proc/vmcore.c u64 paddr, start, end, size; start 1103 fs/proc/vmcore.c start = rounddown(paddr, PAGE_SIZE); start 1105 fs/proc/vmcore.c size = end - start; start 1111 fs/proc/vmcore.c new->paddr = start; start 1116 fs/proc/vmcore.c phdr_ptr->p_offset = vmcore_off + (paddr - start); start 1140 fs/proc/vmcore.c u64 paddr, start, end, size; start 1146 fs/proc/vmcore.c start = rounddown(paddr, PAGE_SIZE); start 1148 fs/proc/vmcore.c size = end - start; start 1154 fs/proc/vmcore.c new->paddr = start; start 1159 fs/proc/vmcore.c phdr_ptr->p_offset = vmcore_off + (paddr - start); start 1372 fs/proc/vmcore.c u64 start, end, size; start 1391 fs/proc/vmcore.c start = rounddown(phdr->p_offset, PAGE_SIZE); start 1394 fs/proc/vmcore.c size = end - start; start 1395 fs/proc/vmcore.c phdr->p_offset = vmcore_off + (phdr->p_offset - start); start 1411 fs/proc/vmcore.c start = rounddown(phdr->p_offset, PAGE_SIZE); start 1414 fs/proc/vmcore.c size = end - start; start 1415 fs/proc/vmcore.c phdr->p_offset = vmcore_off + (phdr->p_offset - start); start 119 fs/pstore/inode.c .start = pstore_ftrace_seq_start, start 688 fs/pstore/ram.c pdata->mem_address = res->start; start 35 fs/pstore/ram_core.c atomic_t start; start 49 fs/pstore/ram_core.c return atomic_read(&prz->buffer->start); start 62 fs/pstore/ram_core.c old = atomic_read(&prz->buffer->start); start 66 fs/pstore/ram_core.c atomic_set(&prz->buffer->start, new); start 123 fs/pstore/ram_core.c unsigned int start, unsigned int count) start 136 fs/pstore/ram_core.c block = buffer->data + (start & ~(ecc_block_size - 1)); start 137 fs/pstore/ram_core.c par = prz->par_buffer + (start / ecc_block_size) * ecc_size; start 145 fs/pstore/ram_core.c } while (block < buffer->data + start + count); start 275 fs/pstore/ram_core.c const void *s, unsigned int start, unsigned int count) start 278 fs/pstore/ram_core.c memcpy_toio(buffer->data + start, s, count); start 279 fs/pstore/ram_core.c persistent_ram_update_ecc(prz, start, count); start 283 fs/pstore/ram_core.c const void __user *s, unsigned int start, unsigned int count) start 286 fs/pstore/ram_core.c int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ? start 288 fs/pstore/ram_core.c persistent_ram_update_ecc(prz, start, count); start 296 fs/pstore/ram_core.c size_t start = buffer_start(prz); start 311 fs/pstore/ram_core.c memcpy_fromio(prz->old_log, &buffer->data[start], size - start); start 312 fs/pstore/ram_core.c memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); start 320 fs/pstore/ram_core.c size_t start; start 329 fs/pstore/ram_core.c start = buffer_start_add(prz, c); start 331 fs/pstore/ram_core.c rem = prz->buffer_size - start; start 333 fs/pstore/ram_core.c persistent_ram_update(prz, s, start, rem); start 336 fs/pstore/ram_core.c start = 0; start 338 fs/pstore/ram_core.c persistent_ram_update(prz, s, start, c); start 349 fs/pstore/ram_core.c size_t start; start 360 fs/pstore/ram_core.c start = buffer_start_add(prz, c); start 362 fs/pstore/ram_core.c rem = prz->buffer_size - start; start 364 fs/pstore/ram_core.c ret = persistent_ram_update_user(prz, s, start, rem); start 367 fs/pstore/ram_core.c start = 0; start 370 fs/pstore/ram_core.c ret = persistent_ram_update_user(prz, s, start, c); start 396 fs/pstore/ram_core.c atomic_set(&prz->buffer->start, 0); start 401 fs/pstore/ram_core.c static void *persistent_ram_vmap(phys_addr_t start, size_t size, start 411 fs/pstore/ram_core.c page_start = start - offset_in_page(start); start 412 fs/pstore/ram_core.c page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); start 438 fs/pstore/ram_core.c return vaddr + offset_in_page(start); start 441 fs/pstore/ram_core.c static void *persistent_ram_iomap(phys_addr_t start, size_t size, start 446 fs/pstore/ram_core.c if (!request_mem_region(start, size, label ?: "ramoops")) { start 449 fs/pstore/ram_core.c (unsigned long long)size, (unsigned long long)start); start 454 fs/pstore/ram_core.c va = ioremap(start, size); start 456 fs/pstore/ram_core.c va = ioremap_wc(start, size); start 466 fs/pstore/ram_core.c static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, start 469 fs/pstore/ram_core.c prz->paddr = start; start 472 fs/pstore/ram_core.c if (pfn_valid(start >> PAGE_SHIFT)) start 473 fs/pstore/ram_core.c prz->vaddr = persistent_ram_vmap(start, size, memtype); start 475 fs/pstore/ram_core.c prz->vaddr = persistent_ram_iomap(start, size, memtype, start 480 fs/pstore/ram_core.c (unsigned long long)size, (unsigned long long)start); start 561 fs/pstore/ram_core.c struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, start 579 fs/pstore/ram_core.c ret = persistent_ram_buffer_map(start, size, prz, memtype); start 23 fs/qnx4/bitmap.c int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1; start 33 fs/qnx4/bitmap.c if ((bh = sb_bread(sb, start + offset)) == NULL) { start 120 fs/qnx6/dir.c unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE; start 127 fs/qnx6/dir.c for ( ; !done && n < npages; n++, start = 0) { start 131 fs/qnx6/dir.c int i = start; start 138 fs/qnx6/dir.c de = ((struct qnx6_dir_entry *)page_address(page)) + start; start 219 fs/qnx6/dir.c unsigned long start, n; start 229 fs/qnx6/dir.c start = ei->i_dir_start_lookup; start 230 fs/qnx6/dir.c if (start >= npages) start 231 fs/qnx6/dir.c start = 0; start 232 fs/qnx6/dir.c n = start; start 264 fs/qnx6/dir.c } while (n != start); start 348 fs/reiserfs/bitmap.c b_blocknr_t * start, b_blocknr_t finish, start 364 fs/reiserfs/bitmap.c get_bit_address(s, *start, &bm, &off); start 397 fs/reiserfs/bitmap.c get_bit_address(s, *start, &bm, &off); start 411 fs/reiserfs/bitmap.c *start = bm * off_max + off; start 1152 fs/reiserfs/bitmap.c b_blocknr_t start, start 1160 fs/reiserfs/bitmap.c while (rest > 0 && start <= finish) { start 1161 fs/reiserfs/bitmap.c nr_allocated = scan_bitmap(hint->th, &start, finish, min, start 1170 fs/reiserfs/bitmap.c *new_blocknrs++ = start++; start 1184 fs/reiserfs/bitmap.c REISERFS_I(hint->inode)->i_prealloc_block = start; start 1198 fs/reiserfs/bitmap.c b_blocknr_t start = hint->search_start; start 1237 fs/reiserfs/bitmap.c start = hint->search_start; start 1241 fs/reiserfs/bitmap.c start = hint->beg; start 1245 fs/reiserfs/bitmap.c start = 0; start 1277 fs/reiserfs/bitmap.c start, finish, start 17 fs/reiserfs/dir.c static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, start 31 fs/reiserfs/dir.c static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, start 37 fs/reiserfs/dir.c err = file_write_and_wait_range(filp, start, end); start 150 fs/reiserfs/file.c static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, start 157 fs/reiserfs/file.c err = file_write_and_wait_range(filp, start, end); start 2197 fs/reiserfs/inode.c unsigned long start = 0; start 2220 fs/reiserfs/inode.c start = (offset / blocksize) * blocksize; start 2222 fs/reiserfs/inode.c error = __block_write_begin(page, start, offset - start, start 2230 fs/reiserfs/inode.c if (pos >= start) { start 2903 fs/reiserfs/inode.c unsigned start; start 2915 fs/reiserfs/inode.c start = pos & (PAGE_SIZE - 1); start 2920 fs/reiserfs/inode.c page_zero_new_buffers(page, start + copied, start + len); start 2924 fs/reiserfs/inode.c reiserfs_commit_page(inode, page, start, start + copied); start 227 fs/reiserfs/item_ops.c static void start_new_sequence(__u32 * start, int *len, __u32 new) start 229 fs/reiserfs/item_ops.c *start = new; start 233 fs/reiserfs/item_ops.c static int sequence_finished(__u32 start, int *len, __u32 new) start 235 fs/reiserfs/item_ops.c if (start == INT_MAX) start 238 fs/reiserfs/item_ops.c if (start == 0 && new == 0) { start 242 fs/reiserfs/item_ops.c if (start != 0 && (start + *len) == new) { start 249 fs/reiserfs/item_ops.c static void print_sequence(__u32 start, int len) start 251 fs/reiserfs/item_ops.c if (start == INT_MAX) start 255 fs/reiserfs/item_ops.c printk(" %d", start); start 257 fs/reiserfs/item_ops.c printk(" %d(%d)", start, len); start 2375 fs/reiserfs/journal.c time64_t start; start 2389 fs/reiserfs/journal.c start = ktime_get_seconds(); start 2550 fs/reiserfs/journal.c replay_count, ktime_get_seconds() - start); start 108 fs/seq_file.c p = m->op->start(m, &m->index); start 224 fs/seq_file.c p = m->op->start(m, &m->index); start 247 fs/seq_file.c p = m->op->start(m, &m->index); start 573 fs/seq_file.c op->start = single_start; start 1222 fs/splice.c size_t start; start 1225 fs/splice.c copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start); start 1231 fs/splice.c for (n = 0; copied; n++, start = 0) { start 1232 fs/splice.c int size = min_t(int, copied, PAGE_SIZE - start); start 1235 fs/splice.c buf.offset = start; start 101 fs/squashfs/dir.c u64 block = squashfs_i(inode)->start + msblk->directory_table; start 44 fs/squashfs/export.c u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); start 50 fs/squashfs/export.c err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); start 237 fs/squashfs/file.c u64 cur_data_block = squashfs_i(inode)->start; start 328 fs/squashfs/file.c u64 start; start 332 fs/squashfs/file.c int res = fill_meta_index(inode, index, &start, &offset, block); start 335 fs/squashfs/file.c " 0x%x, block 0x%llx\n", res, index, start, offset, start 348 fs/squashfs/file.c blks = read_indexes(inode->i_sb, index - res, &start, &offset); start 357 fs/squashfs/file.c res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset, start 460 fs/squashfs/file.c page->index, squashfs_i(inode)->start); start 162 fs/squashfs/inode.c squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); start 169 fs/squashfs/inode.c offset, squashfs_i(inode)->start, block, offset); start 209 fs/squashfs/inode.c squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); start 216 fs/squashfs/inode.c offset, squashfs_i(inode)->start, block, offset); start 232 fs/squashfs/inode.c squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); start 239 fs/squashfs/inode.c squashfs_i(inode)->start, start 257 fs/squashfs/inode.c squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); start 266 fs/squashfs/inode.c squashfs_i(inode)->start, start 285 fs/squashfs/inode.c squashfs_i(inode)->start = block; start 135 fs/squashfs/namei.c u64 block = squashfs_i(dir)->start + msblk->directory_table; start 227 fs/squashfs/namei.c squashfs_i(dir)->start + msblk->directory_table, start 14 fs/squashfs/squashfs_fs_i.h u64 start; start 39 fs/squashfs/symlink.c u64 block = squashfs_i(inode)->start; start 57 fs/squashfs/symlink.c squashfs_i(inode)->start, start 74 fs/squashfs/symlink.c squashfs_i(inode)->start, start 32 fs/squashfs/xattr.c u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) start 50 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, &entry, &start, &offset, start 69 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, buffer, &start, start 80 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, NULL, &start, start 88 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, &val, &start, &offset, start 93 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, NULL, &start, &offset, start 110 fs/squashfs/xattr.c u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) start 127 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, &entry, &start, &offset, start 137 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, target, &start, start 140 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, NULL, &start, start 152 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, &val, &start, start 157 fs/squashfs/xattr.c &start, &offset, sizeof(xattr_val)); start 161 fs/squashfs/xattr.c start = SQUASHFS_XATTR_BLK(xattr) + start 166 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, &val, &start, &offset, start 177 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, buffer, &start, start 186 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, &val, &start, &offset, start 190 fs/squashfs/xattr.c err = squashfs_read_metadata(sb, NULL, &start, &offset, start 18 fs/squashfs/xattr.h u64 start, u64 *xattr_table_start, int *xattr_ids) start 21 fs/squashfs/xattr.h *xattr_table_start = start; start 53 fs/squashfs/xattr_id.c __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, start 59 fs/squashfs/xattr_id.c id_table = squashfs_read_table(sb, start, sizeof(*id_table)); start 74 fs/squashfs/xattr_id.c if (*xattr_table_start >= start) start 81 fs/squashfs/xattr_id.c return squashfs_read_table(sb, start + sizeof(*id_table), len); start 189 fs/sync.c int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) start 197 fs/sync.c return file->f_op->fsync(file, start, end, datasync); start 132 fs/sysv/dir.c unsigned long start, n; start 139 fs/sysv/dir.c start = SYSV_I(dir)->i_dir_start_lookup; start 140 fs/sysv/dir.c if (start >= npages) start 141 fs/sysv/dir.c start = 0; start 142 fs/sysv/dir.c n = start; start 163 fs/sysv/dir.c } while (n != start); start 1310 fs/ubifs/file.c int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 1325 fs/ubifs/file.c err = file_write_and_wait_range(file, start, end); start 144 fs/ubifs/log.c c->bud_bytes += c->leb_size - bud->start; start 147 fs/ubifs/log.c bud->start, dbg_jhead(bud->jhead), c->bud_bytes); start 225 fs/ubifs/log.c bud->start = offs; start 231 fs/ubifs/log.c ref->offs = cpu_to_le32(bud->start); start 247 fs/ubifs/log.c if (bud->start == 0) { start 319 fs/ubifs/log.c c->cmt_bud_bytes += wbuf->offs - bud->start; start 321 fs/ubifs/log.c bud->lnum, bud->start, dbg_jhead(bud->jhead), start 322 fs/ubifs/log.c wbuf->offs - bud->start, c->cmt_bud_bytes); start 323 fs/ubifs/log.c bud->start = wbuf->offs; start 325 fs/ubifs/log.c c->cmt_bud_bytes += c->leb_size - bud->start; start 327 fs/ubifs/log.c bud->lnum, bud->start, dbg_jhead(bud->jhead), start 328 fs/ubifs/log.c c->leb_size - bud->start, c->cmt_bud_bytes); start 752 fs/ubifs/log.c bud_bytes += c->leb_size - bud->start; start 73 fs/ubifs/misc.h ubifs_tnc_find_child(struct ubifs_znode *znode, int start) start 75 fs/ubifs/misc.h while (start < znode->child_cnt) { start 76 fs/ubifs/misc.h if (znode->zbranch[start].znode) start 77 fs/ubifs/misc.h return znode->zbranch[start].znode; start 78 fs/ubifs/misc.h start += 1; start 492 fs/ubifs/recovery.c int start) start 494 fs/ubifs/recovery.c int lnum = sleb->lnum, endpt = start; start 510 fs/ubifs/recovery.c lnum, start, sleb->endpt); start 522 fs/ubifs/recovery.c lnum, start, sleb->endpt); start 530 fs/ubifs/recovery.c if (start) { start 532 fs/ubifs/recovery.c start, 1); start 625 fs/ubifs/recovery.c int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; start 771 fs/ubifs/recovery.c err = fix_unclean_leb(c, sleb, start); start 102 fs/ubifs/replay.c if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) { start 554 fs/ubifs/replay.c err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1); start 674 fs/ubifs/replay.c int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start; start 914 fs/ubifs/replay.c bud->start = offs; start 969 fs/ubifs/replay.c if (bud->jhead == jhead && bud->start <= offs) start 711 fs/ubifs/ubifs.h int start; start 1990 fs/ubifs/ubifs.h int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync); start 365 fs/udf/balloc.c uint32_t start, end; start 386 fs/udf/balloc.c start = bloc->logicalBlockNum + offset; start 397 fs/udf/balloc.c (elen >> sb->s_blocksize_bits)) == start)) { start 403 fs/udf/balloc.c start += tmp; start 410 fs/udf/balloc.c start += count; start 425 fs/udf/balloc.c eloc.logicalBlockNum = start; start 462 fs/udf/balloc.c eloc.logicalBlockNum = start; start 54 fs/udf/ialloc.c uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; start 87 fs/udf/ialloc.c start, &err); start 983 fs/udf/inode.c int start, length = 0, currlength = 0, i; start 989 fs/udf/inode.c start = c; start 993 fs/udf/inode.c start = c + 1; start 1000 fs/udf/inode.c start = c; start 1003 fs/udf/inode.c for (i = start + 1; i <= *endnum; i++) { start 1018 fs/udf/inode.c int next = laarr[start].extLocation.logicalBlockNum + start 1019 fs/udf/inode.c (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + start 1023 fs/udf/inode.c laarr[start].extLocation.partitionReferenceNum, start 1028 fs/udf/inode.c if (start == (c + 1)) start 1029 fs/udf/inode.c laarr[start].extLength += start 1044 fs/udf/inode.c start = c + 1; start 1047 fs/udf/inode.c for (i = start + 1; numalloc && i < *endnum; i++) { start 1178 fs/udf/inode.c int start = 0, i; start 1191 fs/udf/inode.c start++; start 1195 fs/udf/inode.c for (i = start; i < endnum; i++) { start 810 fs/ufs/balloc.c unsigned start, length, loc; start 818 fs/ufs/balloc.c start = ufs_dtogd(uspi, goal) >> 3; start 820 fs/ufs/balloc.c start = ucpi->c_frotor >> 3; start 822 fs/ufs/balloc.c length = ((uspi->s_fpg + 7) >> 3) - start; start 823 fs/ufs/balloc.c loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length, start 827 fs/ufs/balloc.c length = start + 1; start 836 fs/ufs/balloc.c ucpi->c_cgx, start, length, count, start 840 fs/ufs/balloc.c start = 0; start 842 fs/ufs/balloc.c result = (start + length - loc) << 3; start 875 fs/ufs/balloc.c int i, start, end, forw, back; start 889 fs/ufs/balloc.c start = blkno + 1; start 890 fs/ufs/balloc.c end = start + uspi->s_contigsumsize; start 893 fs/ufs/balloc.c i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start); start 896 fs/ufs/balloc.c forw = i - start; start 901 fs/ufs/balloc.c start = blkno - 1; start 902 fs/ufs/balloc.c end = start - uspi->s_contigsumsize; start 905 fs/ufs/balloc.c i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end); start 908 fs/ufs/balloc.c back = start - i; start 257 fs/ufs/dir.c unsigned long start, n; start 271 fs/ufs/dir.c start = ui->i_dir_start_lookup; start 273 fs/ufs/dir.c if (start >= npages) start 274 fs/ufs/dir.c start = 0; start 275 fs/ufs/dir.c n = start; start 292 fs/ufs/dir.c } while (n != start); start 181 fs/ufs/ialloc.c unsigned cg, bit, i, j, start; start 248 fs/ufs/ialloc.c start = ucpi->c_irotor; start 249 fs/ufs/ialloc.c bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start); start 251 fs/ufs/ialloc.c bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start); start 252 fs/ufs/ialloc.c if (!(bit < start)) { start 259 fs/ufs/ialloc.c UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg); start 442 fs/ufs/util.h unsigned begin, unsigned start, unsigned end) start 446 fs/ufs/util.h size = start - end; start 448 fs/ufs/util.h start += begin; start 449 fs/ufs/util.h base = start >> uspi->s_bpfshift; start 450 fs/ufs/util.h start &= uspi->s_bpfmask; start 453 fs/ufs/util.h size + (uspi->s_bpf - start), uspi->s_bpf) start 454 fs/ufs/util.h - (uspi->s_bpf - start); start 457 fs/ufs/util.h start, start - count); start 458 fs/ufs/util.h if (pos > start - count || !size) start 461 fs/ufs/util.h start = uspi->s_bpf; start 89 fs/userfaultfd.c unsigned long start; start 102 fs/userfaultfd.c unsigned long start; start 112 fs/userfaultfd.c unsigned long start, len; start 117 fs/userfaultfd.c start = range->start; start 119 fs/userfaultfd.c if (len && (start > uwq->msg.arg.pagefault.address || start 120 fs/userfaultfd.c start + len <= uwq->msg.arg.pagefault.address)) start 791 fs/userfaultfd.c unsigned long start, unsigned long end) start 808 fs/userfaultfd.c ewq.msg.arg.remove.start = start; start 817 fs/userfaultfd.c unsigned long start, unsigned long end) start 822 fs/userfaultfd.c if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && start 830 fs/userfaultfd.c unsigned long start, unsigned long end, start 838 fs/userfaultfd.c has_unmap_ctx(ctx, unmaps, start, end)) start 848 fs/userfaultfd.c unmap_ctx->start = start; start 865 fs/userfaultfd.c ewq.msg.arg.remove.start = ctx->start; start 1275 fs/userfaultfd.c __u64 *start, __u64 len) start 1279 fs/userfaultfd.c *start = untagged_addr(*start); start 1281 fs/userfaultfd.c if (*start & ~PAGE_MASK) start 1287 fs/userfaultfd.c if (*start < mmap_min_addr) start 1289 fs/userfaultfd.c if (*start >= task_size) start 1291 fs/userfaultfd.c if (len > task_size - *start) start 1313 fs/userfaultfd.c unsigned long start, end, vma_end; start 1341 fs/userfaultfd.c ret = validate_range(mm, &uffdio_register.range.start, start 1346 fs/userfaultfd.c start = uffdio_register.range.start; start 1347 fs/userfaultfd.c end = start + uffdio_register.range.len; start 1356 fs/userfaultfd.c vma = find_vma_prev(mm, start, &prev); start 1372 fs/userfaultfd.c if (start & (vma_hpagesize - 1)) start 1439 fs/userfaultfd.c if (vma->vm_start < start) start 1459 fs/userfaultfd.c if (vma->vm_start > start) start 1460 fs/userfaultfd.c start = vma->vm_start; start 1464 fs/userfaultfd.c prev = vma_merge(mm, prev, start, vma_end, new_flags, start 1472 fs/userfaultfd.c if (vma->vm_start < start) { start 1473 fs/userfaultfd.c ret = split_vma(mm, vma, start, 1); start 1493 fs/userfaultfd.c start = vma->vm_end; start 1523 fs/userfaultfd.c unsigned long start, end, vma_end; start 1530 fs/userfaultfd.c ret = validate_range(mm, &uffdio_unregister.start, start 1535 fs/userfaultfd.c start = uffdio_unregister.start; start 1536 fs/userfaultfd.c end = start + uffdio_unregister.len; start 1545 fs/userfaultfd.c vma = find_vma_prev(mm, start, &prev); start 1561 fs/userfaultfd.c if (start & (vma_hpagesize - 1)) start 1590 fs/userfaultfd.c if (vma->vm_start < start) start 1608 fs/userfaultfd.c if (vma->vm_start > start) start 1609 fs/userfaultfd.c start = vma->vm_start; start 1620 fs/userfaultfd.c range.start = start; start 1621 fs/userfaultfd.c range.len = vma_end - start; start 1626 fs/userfaultfd.c prev = vma_merge(mm, prev, start, vma_end, new_flags, start 1634 fs/userfaultfd.c if (vma->vm_start < start) { start 1635 fs/userfaultfd.c ret = split_vma(mm, vma, start, 1); start 1655 fs/userfaultfd.c start = vma->vm_end; start 1681 fs/userfaultfd.c ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len); start 1685 fs/userfaultfd.c range.start = uffdio_wake.start; start 1749 fs/userfaultfd.c range.start = uffdio_copy.dst; start 1777 fs/userfaultfd.c ret = validate_range(ctx->mm, &uffdio_zeropage.range.start, start 1786 fs/userfaultfd.c ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, start 1801 fs/userfaultfd.c range.start = uffdio_zeropage.range.start; start 80 fs/xfs/libxfs/xfs_ag.c xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, start 83 fs/xfs/libxfs/xfs_ag.c ASSERT(start >= mp->m_ag_prealloc_blocks); start 84 fs/xfs/libxfs/xfs_ag.c if (start != mp->m_ag_prealloc_blocks) { start 88 fs/xfs/libxfs/xfs_ag.c arec->ar_blockcount = cpu_to_be32(start - start 57 fs/xfs/libxfs/xfs_alloc_btree.c union xfs_btree_ptr *start, start 5181 fs/xfs/libxfs/xfs_bmap.c xfs_fileoff_t start, /* first file offset deleted */ start 5207 fs/xfs/libxfs/xfs_bmap.c trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); start 5246 fs/xfs/libxfs/xfs_bmap.c end = start + len; start 5273 fs/xfs/libxfs/xfs_bmap.c while (end != (xfs_fileoff_t)-1 && end >= start && start 5290 fs/xfs/libxfs/xfs_bmap.c if (end < start) start 5309 fs/xfs/libxfs/xfs_bmap.c if (got.br_startoff < start) { start 5310 fs/xfs/libxfs/xfs_bmap.c del.br_startoff = start; start 5311 fs/xfs/libxfs/xfs_bmap.c del.br_blockcount -= start - got.br_startoff; start 5313 fs/xfs/libxfs/xfs_bmap.c del.br_startblock += start - got.br_startoff; start 5389 fs/xfs/libxfs/xfs_bmap.c } else if (del.br_startoff == start && start 5419 fs/xfs/libxfs/xfs_bmap.c if (prev.br_startoff < start) { start 5420 fs/xfs/libxfs/xfs_bmap.c mod = start - prev.br_startoff; start 5423 fs/xfs/libxfs/xfs_bmap.c prev.br_startoff = start; start 5464 fs/xfs/libxfs/xfs_bmap.c if (end != (xfs_fileoff_t)-1 && end >= start) { start 5474 fs/xfs/libxfs/xfs_bmap.c if (done || end == (xfs_fileoff_t)-1 || end < start) start 5477 fs/xfs/libxfs/xfs_bmap.c *rlen = end - start + 1; start 197 fs/xfs/libxfs/xfs_bmap_btree.c union xfs_btree_ptr *start, start 212 fs/xfs/libxfs/xfs_bmap_btree.c args.fsbno = be64_to_cpu(start->l); start 73 fs/xfs/libxfs/xfs_ialloc_btree.c union xfs_btree_ptr *start, start 80 fs/xfs/libxfs/xfs_ialloc_btree.c xfs_agblock_t sbno = be32_to_cpu(start->s); start 111 fs/xfs/libxfs/xfs_ialloc_btree.c union xfs_btree_ptr *start, start 115 fs/xfs/libxfs/xfs_ialloc_btree.c return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); start 121 fs/xfs/libxfs/xfs_ialloc_btree.c union xfs_btree_ptr *start, start 126 fs/xfs/libxfs/xfs_ialloc_btree.c return xfs_inobt_alloc_block(cur, start, new, stat); start 127 fs/xfs/libxfs/xfs_ialloc_btree.c return __xfs_inobt_alloc_block(cur, start, new, stat, start 361 fs/xfs/libxfs/xfs_iext_tree.c int start) start 365 fs/xfs/libxfs/xfs_iext_tree.c for (i = start; i < KEYS_PER_NODE; i++) { start 377 fs/xfs/libxfs/xfs_iext_tree.c int start) start 381 fs/xfs/libxfs/xfs_iext_tree.c for (i = start; i < xfs_iext_max_recs(ifp); i++) { start 56 fs/xfs/libxfs/xfs_refcount_btree.c union xfs_btree_ptr *start, start 82 fs/xfs/libxfs/xfs_rmap_btree.c union xfs_btree_ptr *start, start 97 fs/xfs/libxfs/xfs_rtbitmap.c xfs_rtblock_t start, /* starting block to look at */ start 118 fs/xfs/libxfs/xfs_rtbitmap.c block = XFS_BITTOBLOCK(mp, start); start 127 fs/xfs/libxfs/xfs_rtbitmap.c word = XFS_BITTOWORD(mp, start); start 129 fs/xfs/libxfs/xfs_rtbitmap.c bit = (int)(start & (XFS_NBWORD - 1)); start 130 fs/xfs/libxfs/xfs_rtbitmap.c len = start - limit + 1; start 158 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start - i + 1; start 204 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start - i + 1; start 251 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start - i + 1; start 260 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start - i + 1; start 272 fs/xfs/libxfs/xfs_rtbitmap.c xfs_rtblock_t start, /* starting block to look at */ start 293 fs/xfs/libxfs/xfs_rtbitmap.c block = XFS_BITTOBLOCK(mp, start); start 302 fs/xfs/libxfs/xfs_rtbitmap.c word = XFS_BITTOWORD(mp, start); start 304 fs/xfs/libxfs/xfs_rtbitmap.c bit = (int)(start & (XFS_NBWORD - 1)); start 305 fs/xfs/libxfs/xfs_rtbitmap.c len = limit - start + 1; start 332 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start + i - 1; start 377 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start + i - 1; start 421 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start + i - 1; start 430 fs/xfs/libxfs/xfs_rtbitmap.c *rtblock = start + i - 1; start 535 fs/xfs/libxfs/xfs_rtbitmap.c xfs_rtblock_t start, /* starting block to modify */ start 554 fs/xfs/libxfs/xfs_rtbitmap.c block = XFS_BITTOBLOCK(mp, start); start 566 fs/xfs/libxfs/xfs_rtbitmap.c word = XFS_BITTOWORD(mp, start); start 568 fs/xfs/libxfs/xfs_rtbitmap.c bit = (int)(start & (XFS_NBWORD - 1)); start 691 fs/xfs/libxfs/xfs_rtbitmap.c xfs_rtblock_t start, /* starting block to free */ start 701 fs/xfs/libxfs/xfs_rtbitmap.c end = start + len - 1; start 705 fs/xfs/libxfs/xfs_rtbitmap.c error = xfs_rtmodify_range(mp, tp, start, len, 1); start 714 fs/xfs/libxfs/xfs_rtbitmap.c error = xfs_rtfind_back(mp, tp, start, 0, &preblock); start 729 fs/xfs/libxfs/xfs_rtbitmap.c if (preblock < start) { start 731 fs/xfs/libxfs/xfs_rtbitmap.c XFS_RTBLOCKLOG(start - preblock), start 767 fs/xfs/libxfs/xfs_rtbitmap.c xfs_rtblock_t start, /* starting block number of extent */ start 788 fs/xfs/libxfs/xfs_rtbitmap.c block = XFS_BITTOBLOCK(mp, start); start 800 fs/xfs/libxfs/xfs_rtbitmap.c word = XFS_BITTOWORD(mp, start); start 802 fs/xfs/libxfs/xfs_rtbitmap.c bit = (int)(start & (XFS_NBWORD - 1)); start 829 fs/xfs/libxfs/xfs_rtbitmap.c *new = start + i; start 875 fs/xfs/libxfs/xfs_rtbitmap.c *new = start + i; start 920 fs/xfs/libxfs/xfs_rtbitmap.c *new = start + i; start 930 fs/xfs/libxfs/xfs_rtbitmap.c *new = start + i; start 1086 fs/xfs/libxfs/xfs_rtbitmap.c xfs_rtblock_t start, start 1094 fs/xfs/libxfs/xfs_rtbitmap.c error = xfs_rtcheck_range(mp, tp, start, len, 1, &end, &matches); start 607 fs/xfs/scrub/agheader_repair.c agbno = XFS_FSB_TO_AGBNO(mp, br->start); start 620 fs/xfs/scrub/agheader_repair.c br->start++; start 189 fs/xfs/scrub/attr.c unsigned int start, start 195 fs/xfs/scrub/attr.c if (start >= mapsize) start 197 fs/xfs/scrub/attr.c if (start + len > mapsize) { start 198 fs/xfs/scrub/attr.c len = mapsize - start; start 202 fs/xfs/scrub/attr.c if (find_next_bit(map, mapsize, start) < start + len) start 204 fs/xfs/scrub/attr.c bitmap_set(map, start, len); start 23 fs/xfs/scrub/bitmap.c uint64_t start, start 33 fs/xfs/scrub/bitmap.c bmr->start = start; start 75 fs/xfs/scrub/bitmap.c if (ap->start > bp->start) start 77 fs/xfs/scrub/bitmap.c if (ap->start < bp->start) start 137 fs/xfs/scrub/bitmap.c while (sub_br->start + sub_br->len <= br->start) { start 142 fs/xfs/scrub/bitmap.c if (sub_br->start >= br->start + br->len) { start 148 fs/xfs/scrub/bitmap.c sub_start = sub_br->start; start 150 fs/xfs/scrub/bitmap.c if (sub_br->start < br->start) { start 151 fs/xfs/scrub/bitmap.c sub_len -= br->start - sub_br->start; start 152 fs/xfs/scrub/bitmap.c sub_start = br->start; start 158 fs/xfs/scrub/bitmap.c if (sub_start == br->start) start 160 fs/xfs/scrub/bitmap.c if (sub_start + sub_len == br->start + br->len) start 165 fs/xfs/scrub/bitmap.c br->start += sub_len; start 191 fs/xfs/scrub/bitmap.c new_br->start = sub_start + sub_len; start 192 fs/xfs/scrub/bitmap.c new_br->len = br->start + br->len - new_br->start; start 194 fs/xfs/scrub/bitmap.c br->len = sub_start - br->start; start 11 fs/xfs/scrub/bitmap.h uint64_t start; start 27 fs/xfs/scrub/bitmap.h for ((b) = bex->start; (b) < bex->start + bex->len; (b)++) start 29 fs/xfs/scrub/bitmap.h int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len); start 91 fs/xfs/xfs_aops.c u64 start = bio->bi_iter.bi_sector; start 115 fs/xfs/xfs_aops.c "writeback error on sector %llu", start); start 1050 fs/xfs/xfs_bmap_util.c xfs_off_t rounding, start, end; start 1057 fs/xfs/xfs_bmap_util.c start = round_down(offset, rounding); start 1060 fs/xfs/xfs_bmap_util.c error = filemap_write_and_wait_range(inode->i_mapping, start, end); start 1063 fs/xfs/xfs_bmap_util.c truncate_pagecache_range(inode, start, end); start 346 fs/xfs/xfs_buf.c xfs_off_t start, end; start 389 fs/xfs/xfs_buf.c start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; start 392 fs/xfs/xfs_buf.c page_count = end - start; start 880 fs/xfs/xfs_buf_item.c uint start; start 887 fs/xfs/xfs_buf_item.c start = 0; start 889 fs/xfs/xfs_buf_item.c if (start > last) start 891 fs/xfs/xfs_buf_item.c end = start + BBTOB(bp->b_maps[i].bm_len) - 1; start 895 fs/xfs/xfs_buf_item.c start += BBTOB(bp->b_maps[i].bm_len); start 905 fs/xfs/xfs_buf_item.c if (first < start) start 906 fs/xfs/xfs_buf_item.c first = start; start 909 fs/xfs/xfs_buf_item.c xfs_buf_item_log_segment(first - start, end - start, start 912 fs/xfs/xfs_buf_item.c start += BBTOB(bp->b_maps[i].bm_len); start 25 fs/xfs/xfs_discard.c xfs_daddr_t start, start 97 fs/xfs/xfs_discard.c if (dbno + dlen < start || dbno > end) { start 153 fs/xfs/xfs_discard.c xfs_daddr_t start, end, minlen; start 182 fs/xfs/xfs_discard.c if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || start 187 fs/xfs/xfs_discard.c start = BTOBB(range.start); start 188 fs/xfs/xfs_discard.c end = start + BTOBBT(range.len) - 1; start 193 fs/xfs/xfs_discard.c start_agno = xfs_daddr_to_agno(mp, start); start 197 fs/xfs/xfs_discard.c error = xfs_trim_extents(mp, agno, start, end, minlen, start 614 fs/xfs/xfs_dquot.c xfs_fsblock_t start; start 628 fs/xfs/xfs_dquot.c start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk; start 637 fs/xfs/xfs_dquot.c if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) { start 639 fs/xfs/xfs_dquot.c if (got.br_startoff < start) start 640 fs/xfs/xfs_dquot.c got.br_startoff = start; start 345 fs/xfs/xfs_dquot_item.c struct xfs_qoff_logitem *start, start 352 fs/xfs/xfs_dquot_item.c xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? start 355 fs/xfs/xfs_dquot_item.c qf->qql_start_lip = start; start 78 fs/xfs/xfs_file.c loff_t start, start 101 fs/xfs/xfs_file.c loff_t start, start 114 fs/xfs/xfs_file.c error = file_write_and_wait_range(file, start, end); start 946 fs/xfs/xfs_file.c loff_t start, start 962 fs/xfs/xfs_file.c ret = generic_fadvise(file, start, end, advice); start 1064 fs/xfs/xfs_ioctl.c unsigned int start) start 1066 fs/xfs/xfs_ioctl.c unsigned int xflags = start; start 1105 fs/xfs/xfs_iops.c u64 start, start 1113 fs/xfs/xfs_iops.c error = iomap_fiemap(inode, fieinfo, start, length, start 1116 fs/xfs/xfs_iops.c error = iomap_fiemap(inode, fieinfo, start, length, start 250 fs/xfs/xfs_pnfs.c u64 start, length, end; start 252 fs/xfs/xfs_pnfs.c start = maps[i].offset; start 253 fs/xfs/xfs_pnfs.c if (start > size) start 256 fs/xfs/xfs_pnfs.c end = start + maps[i].length; start 260 fs/xfs/xfs_pnfs.c length = end - start; start 268 fs/xfs/xfs_pnfs.c start >> PAGE_SHIFT, start 272 fs/xfs/xfs_pnfs.c error = xfs_iomap_write_unwritten(ip, start, length, false); start 145 fs/xfs/xfs_rtalloc.c xfs_rtblock_t start, /* start block to allocate */ start 155 fs/xfs/xfs_rtalloc.c end = start + len - 1; start 161 fs/xfs/xfs_rtalloc.c error = xfs_rtfind_back(mp, tp, start, 0, &preblock); start 187 fs/xfs/xfs_rtalloc.c if (preblock < start) { start 189 fs/xfs/xfs_rtalloc.c XFS_RTBLOCKLOG(start - preblock), start 210 fs/xfs/xfs_rtalloc.c error = xfs_rtmodify_range(mp, tp, start, len, 0); start 106 fs/xfs/xfs_rtalloc.h xfs_rtblock_t start, xfs_extlen_t len, int val, start 109 fs/xfs/xfs_rtalloc.h xfs_rtblock_t start, xfs_rtblock_t limit, start 112 fs/xfs/xfs_rtalloc.h xfs_rtblock_t start, xfs_rtblock_t limit, start 115 fs/xfs/xfs_rtalloc.h xfs_rtblock_t start, xfs_extlen_t len, int val); start 124 fs/xfs/xfs_rtalloc.h xfs_rtblock_t start, xfs_extlen_t len, start 136 fs/xfs/xfs_rtalloc.h xfs_rtblock_t start, xfs_extlen_t len, start 1346 fs/xfs/xfs_trace.h TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), start 1347 fs/xfs/xfs_trace.h TP_ARGS(ip, start, finish), start 1352 fs/xfs/xfs_trace.h __field(xfs_off_t, start) start 1359 fs/xfs/xfs_trace.h __entry->start = start; start 1366 fs/xfs/xfs_trace.h __entry->start, start 531 include/acpi/acpi_bus.h void acpi_bus_trim(struct acpi_device *start); start 19 include/asm-generic/4level-fixup.h #define pud_offset(pgd, start) (pgd) start 21 include/asm-generic/5level-fixup.h #define p4d_offset(pgd, start) (pgd) start 34 include/asm-generic/cacheflush.h unsigned long start, start 67 include/asm-generic/cacheflush.h static inline void flush_icache_range(unsigned long start, unsigned long end) start 88 include/asm-generic/cacheflush.h static inline void flush_cache_vmap(unsigned long start, unsigned long end) start 94 include/asm-generic/cacheflush.h static inline void flush_cache_vunmap(unsigned long start, unsigned long end) start 21 include/asm-generic/mm_hooks.h unsigned long start, unsigned long end) start 242 include/asm-generic/tlb.h unsigned long start; start 289 include/asm-generic/tlb.h struct mm_struct *mm, unsigned long start, unsigned long end); start 292 include/asm-generic/tlb.h unsigned long start, unsigned long end, bool force); start 298 include/asm-generic/tlb.h tlb->start = min(tlb->start, address); start 305 include/asm-generic/tlb.h tlb->start = tlb->end = ~0; start 307 include/asm-generic/tlb.h tlb->start = TASK_SIZE; start 372 include/asm-generic/tlb.h flush_tlb_range(&vma, tlb->start, tlb->end); start 409 include/asm-generic/tlb.h mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); start 109 include/crypto/scatterwalk.h unsigned int start, unsigned int nbytes, int out); start 32 include/drm/drm_agpsupport.h int drm_bind_agp(struct agp_memory * handle, unsigned int start); start 68 include/drm/drm_agpsupport.h static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start) start 269 include/drm/drm_mipi_dsi.h int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, start 271 include/drm/drm_mipi_dsi.h int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, start 159 include/drm/drm_mm.h u64 start; start 198 include/drm/drm_mm.h u64 *start, u64 *end); start 296 include/drm/drm_mm.h return hole_node->start + hole_node->size; start 318 include/drm/drm_mm.h return list_next_entry(hole_node, node_list)->start; start 408 include/drm/drm_mm.h u64 start, start 463 include/drm/drm_mm.h void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); start 480 include/drm/drm_mm.h __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); start 501 include/drm/drm_mm.h node__->start < (end__); \ start 507 include/drm/drm_mm.h u64 start, u64 end, start 124 include/drm/drm_print.h ssize_t start; start 69 include/drm/drm_vma_manager.h unsigned long start, start 96 include/drm/drm_vma_manager.h unsigned long start, start 101 include/drm/drm_vma_manager.h node = drm_vma_offset_lookup_locked(mgr, start, pages); start 102 include/drm/drm_vma_manager.h return (node && node->vm_node.start == start) ? node : NULL; start 170 include/drm/drm_vma_manager.h return node->vm_node.start; start 204 include/drm/drm_vma_manager.h return ((__u64)node->vm_node.start) << PAGE_SHIFT; start 97 include/drm/ttm/ttm_bo_api.h unsigned long start; start 25 include/keys/rxrpc-type.h u32 start; /* time at which ticket starts */ start 457 include/linux/acpi.h int acpi_check_region(resource_size_t start, resource_size_t n, start 607 include/linux/acpi.h extern int acpi_nvs_register(__u64 start, __u64 size); start 777 include/linux/acpi.h static inline int acpi_check_region(resource_size_t start, resource_size_t n, start 790 include/linux/acpi.h static inline int acpi_nvs_register(__u64 start, __u64 size) start 1114 include/linux/acpi.h int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); start 53 include/linux/alarmtimer.h void alarm_start(struct alarm *alarm, ktime_t start); start 54 include/linux/alarmtimer.h void alarm_start_relative(struct alarm *alarm, ktime_t start); start 208 include/linux/atmdev.h int (*start)(struct atm_dev *dev); start 153 include/linux/bio.h #define __bio_for_each_segment(bvl, bio, iter, start) \ start 154 include/linux/bio.h for (iter = (start); \ start 162 include/linux/bio.h #define __bio_for_each_bvec(bvl, bio, iter, start) \ start 163 include/linux/bio.h for (iter = (start); \ start 146 include/linux/bitmap.h extern void __bitmap_set(unsigned long *map, unsigned int start, int len); start 147 include/linux/bitmap.h extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); start 151 include/linux/bitmap.h unsigned long start, start 171 include/linux/bitmap.h unsigned long start, start 175 include/linux/bitmap.h return bitmap_find_next_zero_area_off(map, size, start, nr, start 208 include/linux/bitmap.h #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) start 389 include/linux/bitmap.h static __always_inline void bitmap_set(unsigned long *map, unsigned int start, start 393 include/linux/bitmap.h __set_bit(start, map); start 394 include/linux/bitmap.h else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && start 395 include/linux/bitmap.h IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && start 398 include/linux/bitmap.h memset((char *)map + start / 8, 0xff, nbits / 8); start 400 include/linux/bitmap.h __bitmap_set(map, start, nbits); start 403 include/linux/bitmap.h static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, start 407 include/linux/bitmap.h __clear_bit(start, map); start 408 include/linux/bitmap.h else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && start 409 include/linux/bitmap.h IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && start 412 include/linux/bitmap.h memset((char *)map + start / 8, 0, nbits / 8); start 414 include/linux/bitmap.h __bitmap_clear(map, start, nbits); start 83 include/linux/blktrace_api.h extern int blk_trace_startstop(struct request_queue *q, int start); start 95 include/linux/blktrace_api.h # define blk_trace_startstop(q, start) (-ENOTTY) start 342 include/linux/bpf_verifier.h u32 start; /* insn idx of function entry point */ start 20 include/linux/bpfilter.h int (*start)(void); start 113 include/linux/bvec.h #define for_each_bvec(bvl, bio_vec, iter, start) \ start 114 include/linux/bvec.h for (iter = (start); \ start 198 include/linux/ceph/ceph_fs.h __le32 start; start 213 include/linux/ceph/ceph_fs.h __le64 start; start 435 include/linux/ceph/ceph_fs.h __le64 start; /* initial location to lock */ start 551 include/linux/ceph/ceph_fs.h __le64 start;/* file offset to start lock at */ start 790 include/linux/compat.h asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, start 550 include/linux/cpufreq.h int (*start)(struct cpufreq_policy *policy); start 179 include/linux/cpumask.h int start, bool wrap) start 201 include/linux/cpumask.h #define for_each_cpu_wrap(cpu, mask, start) \ start 202 include/linux/cpumask.h for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) start 273 include/linux/cpumask.h extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); start 285 include/linux/cpumask.h #define for_each_cpu_wrap(cpu, mask, start) \ start 286 include/linux/cpumask.h for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ start 288 include/linux/cpumask.h (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) start 122 include/linux/dax.h struct block_device *bdev, int blocksize, sector_t start, start 125 include/linux/dax.h struct block_device *bdev, int blocksize, sector_t start, start 128 include/linux/dax.h return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, start 157 include/linux/dax.h struct block_device *bdev, int blocksize, sector_t start, start 207 include/linux/dax.h int blocksize, sector_t start, sector_t len); start 112 include/linux/device-mapper.h sector_t start, sector_t len, start 425 include/linux/device-mapper.h void dm_remap_zone_report(struct dm_target *ti, sector_t start, start 459 include/linux/device-mapper.h sector_t start, sector_t len, char *params); start 162 include/linux/device.h struct device *start, start 174 include/linux/device.h int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, start 176 include/linux/device.h struct device *bus_find_device(struct bus_type *bus, struct device *start, start 187 include/linux/device.h struct device *start, start 190 include/linux/device.h return bus_find_device(bus, start, name, device_match_name); start 265 include/linux/device.h int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, start 426 include/linux/device.h struct device *start, start 431 include/linux/device.h struct device *start, const void *data, start 485 include/linux/device.h struct device *start) start 487 include/linux/device.h return driver_find_device(drv, start, NULL, device_match_any); start 628 include/linux/device.h struct device *start, start 633 include/linux/device.h extern int class_for_each_device(struct class *class, struct device *start, start 637 include/linux/device.h struct device *start, const void *data, start 26 include/linux/dim.h #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \ start 237 include/linux/dim.h void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, start 256 include/linux/dio.h #define dio_resource_start(d) ((d)->resource.start) start 39 include/linux/dm-bufio.h void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); start 103 include/linux/dm-bufio.h unsigned start, unsigned end); start 636 include/linux/dma-mapping.h bool dma_in_atomic_pool(void *start, size_t size); start 638 include/linux/dma-mapping.h bool dma_free_from_pool(void *start, size_t size); start 111 include/linux/dmar.h extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, start 113 include/linux/dmar.h extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); start 116 include/linux/dmar.h void *start, void*end, u16 segment, start 176 include/linux/efi.h typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); start 1174 include/linux/efi.h static inline int efi_range_is_wc(unsigned long start, unsigned long len) start 1179 include/linux/efi.h unsigned long paddr = __pa(start + i); start 131 include/linux/enclosure.h struct enclosure_device *start); start 15 include/linux/extable.h void sort_extable(struct exception_table_entry *start, start 94 include/linux/fb.h __u32 start; /* First entry */ start 667 include/linux/fb.h extern int fb_deferred_io_fsync(struct file *file, loff_t start, start 119 include/linux/fdtable.h unsigned start, unsigned end, unsigned flags); start 561 include/linux/filter.h u64 start = sched_clock(); \ start 566 include/linux/filter.h stats->nsecs += sched_clock() - start; \ start 343 include/linux/firewire.h u64 start; start 17 include/linux/firmware-map.h int firmware_map_add_early(u64 start, u64 end, const char *type); start 18 include/linux/firmware-map.h int firmware_map_add_hotplug(u64 start, u64 end, const char *type); start 19 include/linux/firmware-map.h int firmware_map_remove(u64 start, u64 end, const char *type); start 23 include/linux/firmware-map.h static inline int firmware_map_add_early(u64 start, u64 end, const char *type) start 28 include/linux/firmware-map.h static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type) start 33 include/linux/firmware-map.h static inline int firmware_map_remove(u64 start, u64 end, const char *type) start 35 include/linux/fpga/fpga-region.h struct device *start, const void *data, start 917 include/linux/fs.h pgoff_t start; /* where readahead started */ start 932 include/linux/fs.h return (index >= ra->start && start 933 include/linux/fs.h index < ra->start + ra->size); start 1884 include/linux/fs.h int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, start 2397 include/linux/fs.h loff_t start, loff_t end, unsigned char type) start 2528 include/linux/fs.h extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, start 2722 include/linux/fs.h pgoff_t start, pgoff_t end); start 2732 include/linux/fs.h pgoff_t start, pgoff_t end); start 2753 include/linux/fs.h loff_t start, loff_t end, int sync_mode); start 2755 include/linux/fs.h loff_t start, loff_t end); start 2763 include/linux/fs.h loff_t start, loff_t end); start 2846 include/linux/fs.h extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, start 3120 include/linux/fs.h extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, start 3275 include/linux/fs.h loff_t start, loff_t len, start 3278 include/linux/fs.h struct fiemap_extent_info *fieinfo, u64 start, start 53 include/linux/fsl/bestcomm/bestcomm_priv.h u32 start; start 279 include/linux/fsl/bestcomm/bestcomm_priv.h return bcom_sram_pa2va(bcom_eng->tdt[task].start); start 285 include/linux/fsl/bestcomm/bestcomm_priv.h return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; start 174 include/linux/ftrace.h void ftrace_free_mem(struct module *mod, void *start, void *end); start 177 include/linux/ftrace.h static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } start 239 include/linux/ftrace.h static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } start 315 include/linux/ftrace.h extern int ftrace_text_reserved(const void *start, const void *end); start 434 include/linux/ftrace.h unsigned long ftrace_location_range(unsigned long start, unsigned long end); start 596 include/linux/ftrace.h static inline int ftrace_text_reserved(const void *start, const void *end) start 50 include/linux/genalloc.h unsigned long start, start 184 include/linux/genalloc.h unsigned long start, unsigned int nr, void *data, start 188 include/linux/genalloc.h unsigned long size, unsigned long start, unsigned int nr, start 192 include/linux/genalloc.h unsigned long size, unsigned long start, unsigned int nr, start 197 include/linux/genalloc.h unsigned long size, unsigned long start, unsigned int nr, start 201 include/linux/genalloc.h unsigned long start, unsigned int nr, void *data, start 209 include/linux/genalloc.h bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, start 628 include/linux/genhd.h int partno, sector_t start, start 613 include/linux/gfp.h extern int alloc_contig_range(unsigned long start, unsigned long end, start 22 include/linux/hdlc.h void (*start)(struct net_device *dev); /* if open & DCD */ start 797 include/linux/hid.h int (*start)(struct hid_device *hdev); start 892 include/linux/hid.h int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); start 237 include/linux/highmem.h unsigned start, unsigned end) start 239 include/linux/highmem.h zero_user_segments(page, start, end, 0, 0); start 243 include/linux/highmem.h unsigned start, unsigned size) start 245 include/linux/highmem.h zero_user_segments(page, start, start + size, 0, 0); start 164 include/linux/hmm.h unsigned long start; start 202 include/linux/huge_mm.h unsigned long start, start 351 include/linux/huge_mm.h unsigned long start, start 81 include/linux/hugetlb.h unsigned long start, unsigned long end, start 84 include/linux/hugetlb.h unsigned long start, unsigned long end, start 100 include/linux/hugetlb.h long hugetlb_unreserve_pages(struct inode *inode, long start, long end, start 124 include/linux/hugetlb.h unsigned long *start, unsigned long *end); start 163 include/linux/hugetlb.h unsigned long *start, unsigned long *end) start 204 include/linux/hugetlb.h struct vm_area_struct *vma, unsigned long start, start 211 include/linux/hugetlb.h struct vm_area_struct *vma, unsigned long start, start 1232 include/linux/hyperv.h void vmbus_free_mmio(resource_size_t start, resource_size_t size); start 114 include/linux/idr.h int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); start 117 include/linux/idr.h int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); start 314 include/linux/idr.h #define ida_simple_get(ida, start, end, gfp) \ start 315 include/linux/idr.h ida_alloc_range(ida, start, (end) - 1, gfp) start 14 include/linux/if_tunnel.h #define for_each_ip_tunnel_rcu(pos, start) \ start 15 include/linux/if_tunnel.h for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next)) start 313 include/linux/input.h void (*start)(struct input_handle *handle); start 9 include/linux/interval_tree.h unsigned long start; /* Start of interval */ start 24 include/linux/interval_tree.h unsigned long start, unsigned long last); start 28 include/linux/interval_tree.h unsigned long start, unsigned long last); start 42 include/linux/interval_tree_generic.h ITTYPE start = ITSTART(node), last = ITLAST(node); \ start 51 include/linux/interval_tree_generic.h if (start < ITSTART(parent)) \ start 81 include/linux/interval_tree_generic.h ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ start 91 include/linux/interval_tree_generic.h if (start <= left->ITSUBTREE) { \ start 105 include/linux/interval_tree_generic.h if (start <= ITLAST(node)) /* Cond2 */ \ start 110 include/linux/interval_tree_generic.h if (start <= node->ITSUBTREE) \ start 120 include/linux/interval_tree_generic.h ITTYPE start, ITTYPE last) \ start 141 include/linux/interval_tree_generic.h if (node->ITSUBTREE < start) \ start 148 include/linux/interval_tree_generic.h return ITPREFIX ## _subtree_search(node, start, last); \ start 152 include/linux/interval_tree_generic.h ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ start 166 include/linux/interval_tree_generic.h if (start <= right->ITSUBTREE) \ start 168 include/linux/interval_tree_generic.h start, last); \ start 184 include/linux/interval_tree_generic.h else if (start <= ITLAST(node)) /* Cond2 */ \ start 178 include/linux/iomap.h loff_t start, loff_t len, const struct iomap_ops *ops); start 28 include/linux/iommu-helper.h unsigned long start, unsigned int nr, start 159 include/linux/iommu.h phys_addr_t start; start 206 include/linux/iommu.h unsigned long start; start 399 include/linux/iommu.h .start = ULONG_MAX, start 443 include/linux/iommu.h iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, start 518 include/linux/iommu.h unsigned long start = iova, end = start + size; start 526 include/linux/iommu.h end < gather->start || start > gather->end) { start 535 include/linux/iommu.h if (gather->start > start) start 536 include/linux/iommu.h gather->start = start; start 21 include/linux/ioport.h resource_size_t start; start 150 include/linux/ioport.h .start = (_start), \ start 186 include/linux/ioport.h resource_size_t start, resource_size_t end, start 201 include/linux/ioport.h struct resource *lookup_resource(struct resource *root, resource_size_t start); start 202 include/linux/ioport.h int adjust_resource(struct resource *res, resource_size_t start, start 207 include/linux/ioport.h return res->end - res->start + 1; start 224 include/linux/ioport.h return r1->start <= r2->start && r1->end >= r2->end; start 229 include/linux/ioport.h #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) start 230 include/linux/ioport.h #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) start 231 include/linux/ioport.h #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) start 232 include/linux/ioport.h #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) start 233 include/linux/ioport.h #define request_mem_region_exclusive(start,n,name) \ start 234 include/linux/ioport.h __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) start 238 include/linux/ioport.h resource_size_t start, start 243 include/linux/ioport.h #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) start 244 include/linux/ioport.h #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) start 260 include/linux/ioport.h #define devm_request_region(dev,start,n,name) \ start 261 include/linux/ioport.h __devm_request_region(dev, &ioport_resource, (start), (n), (name)) start 262 include/linux/ioport.h #define devm_request_mem_region(dev,start,n,name) \ start 263 include/linux/ioport.h __devm_request_region(dev, &iomem_resource, (start), (n), (name)) start 266 include/linux/ioport.h struct resource *parent, resource_size_t start, start 269 include/linux/ioport.h #define devm_release_region(dev, start, n) \ start 270 include/linux/ioport.h __devm_release_region(dev, &ioport_resource, (start), (n)) start 271 include/linux/ioport.h #define devm_release_mem_region(dev, start, n) \ start 272 include/linux/ioport.h __devm_release_region(dev, &iomem_resource, (start), (n)) start 275 include/linux/ioport.h resource_size_t start, resource_size_t n); start 283 include/linux/ioport.h walk_mem_res(u64 start, u64 end, void *arg, start 286 include/linux/ioport.h walk_system_ram_res(u64 start, u64 end, void *arg, start 289 include/linux/ioport.h walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, start 295 include/linux/ioport.h return (r1->start <= r2->end && r1->end >= r2->start); start 76 include/linux/ipack.h phys_addr_t start; start 54 include/linux/irqbypass.h void (*start)(struct irq_bypass_producer *); start 79 include/linux/irqbypass.h void (*start)(struct irq_bypass_consumer *); start 747 include/linux/jbd2.h jbd2_time_diff(unsigned long start, unsigned long end) start 749 include/linux/jbd2.h if (end >= start) start 750 include/linux/jbd2.h return end - start; start 752 include/linux/jbd2.h return end + (MAX_JIFFY_OFFSET - start); start 1386 include/linux/jbd2.h unsigned long long start, int len, int bsize); start 221 include/linux/jump_label.h extern int jump_label_text_reserved(void *start, void *end); start 291 include/linux/jump_label.h static inline int jump_label_text_reserved(void *start, void *end) start 76 include/linux/kasan.h int kasan_add_zero_shadow(void *start, unsigned long size); start 77 include/linux/kasan.h void kasan_remove_zero_shadow(void *start, unsigned long size); start 148 include/linux/kasan.h static inline int kasan_add_zero_shadow(void *start, unsigned long size) start 152 include/linux/kasan.h static inline void kasan_remove_zero_shadow(void *start, start 205 include/linux/kexec.h u64 start, end; start 249 include/linux/kexec.h unsigned long start; start 239 include/linux/kprobes.h extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end); start 22 include/linux/ksm.h int ksm_madvise(struct vm_area_struct *vma, unsigned long start, start 71 include/linux/ksm.h static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, start 1013 include/linux/kvm_host.h int start = 0, end = slots->used_slots; start 1021 include/linux/kvm_host.h while (start < end) { start 1022 include/linux/kvm_host.h slot = start + (end - start) / 2; start 1027 include/linux/kvm_host.h start = slot + 1; start 1030 include/linux/kvm_host.h if (start < slots->used_slots && gfn >= memslots[start].base_gfn && start 1031 include/linux/kvm_host.h gfn < memslots[start].base_gfn + memslots[start].npages) { start 1032 include/linux/kvm_host.h atomic_set(&slots->lru_slot, start); start 1033 include/linux/kvm_host.h return &memslots[start]; start 1380 include/linux/kvm_host.h unsigned long start, unsigned long end); start 17 include/linux/libnvdimm.h u64 start; start 115 include/linux/libnvdimm.h u64 start; start 209 include/linux/libnvdimm.h void badrange_forget(struct badrange *badrange, phys_addr_t start, start 284 include/linux/lockdep.h extern void lockdep_free_key_range(void *start, unsigned long size); start 454 include/linux/lockdep.h # define lockdep_free_key_range(start, size) do { } while (0) start 108 include/linux/memblock.h phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, start 351 include/linux/memblock.h phys_addr_t start, phys_addr_t end); start 560 include/linux/memblock.h extern void early_memtest(phys_addr_t start, phys_addr_t end); start 562 include/linux/memblock.h static inline void early_memtest(phys_addr_t start, phys_addr_t end) start 113 include/linux/memory.h int create_memory_block_devices(unsigned long start, unsigned long size); start 114 include/linux/memory.h void remove_memory_block_devices(unsigned long start, unsigned long size); start 120 include/linux/memory.h extern int walk_memory_blocks(unsigned long start, unsigned long size, start 114 include/linux/memory_hotplug.h extern int arch_add_memory(int nid, u64 start, u64 size, start 126 include/linux/memory_hotplug.h extern void arch_remove_memory(int nid, u64 start, u64 size, start 147 include/linux/memory_hotplug.h extern int memory_add_physaddr_to_nid(u64 start); start 149 include/linux/memory_hotplug.h static inline int memory_add_physaddr_to_nid(u64 start) start 317 include/linux/memory_hotplug.h extern int remove_memory(int nid, u64 start, u64 size); start 318 include/linux/memory_hotplug.h extern void __remove_memory(int nid, u64 start, u64 size); start 334 include/linux/memory_hotplug.h static inline int remove_memory(int nid, u64 start, u64 size) start 339 include/linux/memory_hotplug.h static inline void __remove_memory(int nid, u64 start, u64 size) {} start 343 include/linux/memory_hotplug.h extern int __add_memory(int nid, u64 start, u64 size); start 344 include/linux/memory_hotplug.h extern int add_memory(int nid, u64 start, u64 size); start 121 include/linux/mempolicy.h unsigned long start, end; start 266 include/linux/memstick.h void (*start)(struct memstick_dev *card); start 758 include/linux/mfd/wm8350/pmic.h int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start, start 766 include/linux/mfd/wm8350/pmic.h int wm8350_ldo_set_slot(struct wm8350 *wm8350, int ldo, u16 start, u16 stop); start 121 include/linux/mfd/wm8994/pdata.h int start; start 197 include/linux/migrate.h unsigned long start; start 637 include/linux/mlx5/driver.h unsigned int start; start 646 include/linux/mlx5/driver.h u64 start[MAX_PIN_NUM]; start 254 include/linux/mlx5/qp.h u8 start[2]; start 1461 include/linux/mm.h unsigned long start, unsigned long end); start 1494 include/linux/mm.h pgoff_t start, pgoff_t nr, bool even_cows); start 1514 include/linux/mm.h pgoff_t start, pgoff_t nr, bool even_cows) { } start 1533 include/linux/mm.h unsigned long start, unsigned long nr_pages, start 1536 include/linux/mm.h long get_user_pages(unsigned long start, unsigned long nr_pages, start 1539 include/linux/mm.h long get_user_pages_locked(unsigned long start, unsigned long nr_pages, start 1541 include/linux/mm.h long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, start 1544 include/linux/mm.h int get_user_pages_fast(unsigned long start, int nr_pages, start 1564 include/linux/mm.h int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, start 1596 include/linux/mm.h int get_kernel_page(unsigned long start, int write, struct page **pages); start 1628 include/linux/mm.h extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, start 1632 include/linux/mm.h struct vm_area_struct **pprev, unsigned long start, start 1638 include/linux/mm.h int __get_user_pages_fast(unsigned long start, int nr_pages, int write, start 2085 include/linux/mm.h extern unsigned long free_reserved_area(void *start, void *end, start 2099 include/linux/mm.h extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); start 2250 include/linux/mm.h unsigned long start, unsigned long last); start 2252 include/linux/mm.h unsigned long start, unsigned long last); start 2254 include/linux/mm.h #define vma_interval_tree_foreach(vma, root, start, last) \ start 2255 include/linux/mm.h for (vma = vma_interval_tree_iter_first(root, start, last); \ start 2256 include/linux/mm.h vma; vma = vma_interval_tree_iter_next(vma, start, last)) start 2264 include/linux/mm.h unsigned long start, unsigned long last); start 2266 include/linux/mm.h struct anon_vma_chain *node, unsigned long start, unsigned long last); start 2271 include/linux/mm.h #define anon_vma_interval_tree_foreach(avc, root, start, last) \ start 2272 include/linux/mm.h for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ start 2273 include/linux/mm.h avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) start 2277 include/linux/mm.h extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, start 2280 include/linux/mm.h static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, start 2283 include/linux/mm.h return __vma_adjust(vma, start, end, pgoff, insert, NULL); start 2305 include/linux/mm.h unsigned long start, start 2310 include/linux/mm.h if (((new - start) + (end_data - start_data)) > rlim) start 2520 include/linux/mm.h unsigned long start, unsigned long end) start 2522 include/linux/mm.h return (vma && vma->vm_start <= start && end <= vma->vm_end); start 2541 include/linux/mm.h unsigned long start, unsigned long end); start 2560 include/linux/mm.h int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); start 2785 include/linux/mm.h int vmemmap_populate_basepages(unsigned long start, unsigned long end, start 2787 include/linux/mm.h int vmemmap_populate(unsigned long start, unsigned long end, int node, start 2791 include/linux/mm.h void vmemmap_free(unsigned long start, unsigned long end, start 555 include/linux/mm_types.h unsigned long start, unsigned long end); start 557 include/linux/mm_types.h unsigned long start, unsigned long end); start 67 include/linux/mmu_notifier.h unsigned long start; start 111 include/linux/mmu_notifier.h unsigned long start, start 121 include/linux/mmu_notifier.h unsigned long start, start 217 include/linux/mmu_notifier.h unsigned long start, unsigned long end); start 281 include/linux/mmu_notifier.h unsigned long start, start 284 include/linux/mmu_notifier.h unsigned long start, start 294 include/linux/mmu_notifier.h unsigned long start, unsigned long end); start 311 include/linux/mmu_notifier.h unsigned long start, start 315 include/linux/mmu_notifier.h return __mmu_notifier_clear_flush_young(mm, start, end); start 320 include/linux/mmu_notifier.h unsigned long start, start 324 include/linux/mmu_notifier.h return __mmu_notifier_clear_young(mm, start, end); start 388 include/linux/mmu_notifier.h unsigned long start, unsigned long end) start 391 include/linux/mmu_notifier.h __mmu_notifier_invalidate_range(mm, start, end); start 411 include/linux/mmu_notifier.h unsigned long start, start 417 include/linux/mmu_notifier.h range->start = start; start 532 include/linux/mmu_notifier.h unsigned long start; start 537 include/linux/mmu_notifier.h unsigned long start, start 540 include/linux/mmu_notifier.h range->start = start; start 544 include/linux/mmu_notifier.h #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ start 545 include/linux/mmu_notifier.h _mmu_notifier_range_init(range, start, end) start 563 include/linux/mmu_notifier.h unsigned long start, start 602 include/linux/mmu_notifier.h unsigned long start, unsigned long end) start 844 include/linux/mmzone.h void memory_present(int nid, unsigned long start, unsigned long end); start 846 include/linux/mmzone.h static inline void memory_present(int nid, unsigned long start, unsigned long end) {} start 1408 include/linux/mmzone.h void memory_present(int nid, unsigned long start, unsigned long end); start 562 include/linux/module.h const struct kernel_symbol *start, *stop; start 58 include/linux/mtd/flashchip.h unsigned long start; /* Offset within the map */ start 343 include/linux/mtd/map.h static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) start 350 include/linux/mtd/map.h memcpy(dest+start, buf, len); start 352 include/linux/mtd/map.h for (i = start; i < start+len; i++) { start 361 include/linux/mtd/map.h orig.x[0] |= (unsigned long)buf[i-start] << bitpos; start 350 include/linux/mtd/mtd.h const u8 *oobbuf, int start, int nbytes); start 352 include/linux/mtd/mtd.h u8 *oobbuf, int start, int nbytes); start 356 include/linux/mtd/mtd.h const u8 *oobbuf, int start, int nbytes); start 358 include/linux/mtd/mtd.h u8 *oobbuf, int start, int nbytes); start 701 include/linux/mtd/nand.h #define nanddev_io_for_each_page(nand, start, req, iter) \ start 702 include/linux/mtd/nand.h for (nanddev_io_iter_init(nand, start, req, iter); \ start 156 include/linux/netdev_features.h static inline int find_next_netdev_feature(u64 feature, unsigned long start) start 161 include/linux/netdev_features.h feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); start 2710 include/linux/netdevice.h const void *start, unsigned int len) start 2714 include/linux/netdevice.h csum_partial(start, len, 0)); start 2822 include/linux/netdevice.h int start, int offset, start 2827 include/linux/netdevice.h size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); start 2832 include/linux/netdevice.h NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; start 2844 include/linux/netdevice.h start, offset); start 186 include/linux/netfilter/ipset/ip_set.h bool start); start 215 include/linux/netlink.h int (*start)(struct netlink_callback *); start 59 include/linux/oprofile.h int (*start)(void); start 172 include/linux/oprofile.h void oprofile_put_buff(unsigned long *buf, unsigned int start, start 348 include/linux/pagemap.h unsigned find_get_entries(struct address_space *mapping, pgoff_t start, start 351 include/linux/pagemap.h unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, start 355 include/linux/pagemap.h pgoff_t *start, unsigned int nr_pages, start 358 include/linux/pagemap.h return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, start 361 include/linux/pagemap.h unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, start 30 include/linux/pagevec.h pgoff_t start, unsigned nr_entries, start 35 include/linux/pagevec.h pgoff_t *start, pgoff_t end); start 38 include/linux/pagevec.h pgoff_t *start) start 40 include/linux/pagevec.h return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); start 60 include/linux/pagewalk.h int walk_page_range(struct mm_struct *mm, unsigned long start, start 60 include/linux/pci-epc.h int (*start)(struct pci_epc *epc); start 517 include/linux/pci.h resource_size_t start, start 720 include/linux/pci.h pci_bus_addr_t start; start 1343 include/linux/pci.h return region.start; start 1834 include/linux/pci.h #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) start 1867 include/linux/pci.h resource_size_t *start, resource_size_t *end); start 2159 include/linux/pci.h bool pci_acs_path_enabled(struct pci_dev *start, start 94 include/linux/perf/arm_pmu.h void (*start)(struct arm_pmu *); start 357 include/linux/perf_event.h void (*start) (struct perf_event *event, int flags); start 512 include/linux/perf_event.h unsigned long start; start 29 include/linux/platform_data/dmtimer-omap.h int (*start)(struct omap_dm_timer *timer); start 72 include/linux/platform_data/pwm_omap_dmtimer.h int (*start)(pwm_omap_dmtimer *timer); start 55 include/linux/platform_device.h platform_find_device_by_driver(struct device *start, start 77 include/linux/pm_domain.h int (*start)(struct device *dev); start 53 include/linux/pnp.h if (res->start == 0 && res->end == 0) start 65 include/linux/pnp.h return res->start; start 111 include/linux/pnp.h return res->start; start 155 include/linux/pnp.h return res->start; start 179 include/linux/pnp.h return res->start; start 464 include/linux/pnp.h int pnp_range_reserved(resource_size_t start, resource_size_t end); start 495 include/linux/pnp.h static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;} start 102 include/linux/pstore_ram.h struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, start 10 include/linux/purgatory.h unsigned long start; start 130 include/linux/qed/qed_fcoe_if.h int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); start 228 include/linux/qed/qed_iscsi_if.h int (*start)(struct qed_dev *cdev, start 238 include/linux/qed/qed_ll2_if.h int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params); start 291 include/linux/qed/qed_ll2_if.h .start = NULL, start 269 include/linux/radix-tree.h radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) start 280 include/linux/radix-tree.h iter->next_index = start; start 437 include/linux/radix-tree.h #define radix_tree_for_each_slot(slot, root, iter, start) \ start 438 include/linux/radix-tree.h for (slot = radix_tree_iter_init(iter, start) ; \ start 453 include/linux/radix-tree.h #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ start 454 include/linux/radix-tree.h for (slot = radix_tree_iter_init(iter, start) ; \ start 109 include/linux/random.h unsigned long randomize_page(unsigned long start, unsigned long range); start 6 include/linux/range.h u64 start; start 11 include/linux/range.h u64 start, u64 end); start 15 include/linux/range.h u64 start, u64 end); start 17 include/linux/range.h void subtract_range(struct range *range, int az, u64 start, u64 end); start 36 include/linux/relay.h void *start; /* start of channel buffer */ start 374 include/linux/remoteproc.h int (*start)(struct rproc *rproc); start 532 include/linux/remoteproc.h int (*start)(struct rproc_subdev *subdev); start 256 include/linux/rio_drv.h static inline void rio_init_mbox_res(struct resource *res, int start, int end) start 259 include/linux/rio_drv.h res->start = start; start 274 include/linux/rio_drv.h static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end) start 277 include/linux/rio_drv.h res->start = start; start 244 include/linux/sbitmap.h unsigned int start, start 251 include/linux/sbitmap.h if (start >= sb->depth) start 252 include/linux/sbitmap.h start = 0; start 253 include/linux/sbitmap.h index = SB_NR_TO_INDEX(sb, start); start 254 include/linux/sbitmap.h nr = SB_NR_TO_BIT(sb, start); start 373 include/linux/sctp.h __be16 start; start 33 include/linux/seq_file.h void * (*start) (struct seq_file *m, loff_t *pos); start 203 include/linux/seqlock.h static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) start 205 include/linux/seqlock.h return unlikely(s->sequence != start); start 218 include/linux/seqlock.h static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) start 221 include/linux/seqlock.h return __read_seqcount_retry(s, start); start 436 include/linux/seqlock.h static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) start 438 include/linux/seqlock.h return read_seqcount_retry(&sl->seqcount, start); start 37 include/linux/serio.h int (*start)(struct serio *); start 76 include/linux/shmem_fs.h extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); start 82 include/linux/shmem_fs.h pgoff_t start, pgoff_t end); start 3294 include/linux/skbuff.h __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, start 3299 include/linux/skbuff.h csum_partial(start, len, 0), off); start 3316 include/linux/skbuff.h const void *start, unsigned int len) start 3318 include/linux/skbuff.h __skb_postpull_rcsum(skb, start, len, 0); start 3322 include/linux/skbuff.h __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, start 3327 include/linux/skbuff.h csum_partial(start, len, 0), off); start 3340 include/linux/skbuff.h const void *start, unsigned int len) start 3342 include/linux/skbuff.h __skb_postpush_rcsum(skb, start, len, 0); start 4025 include/linux/skbuff.h u16 start, u16 offset) start 4028 include/linux/skbuff.h skb->csum_start = ((unsigned char *)ptr + start) - skb->head; start 4029 include/linux/skbuff.h skb->csum_offset = offset - start; start 4038 include/linux/skbuff.h int start, int offset, bool nopartial) start 4043 include/linux/skbuff.h skb_remcsum_adjust_partial(skb, ptr, start, offset); start 4052 include/linux/skbuff.h delta = remcsum_adjust(ptr, skb->csum, start, offset); start 4473 include/linux/skbuff.h bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); start 27 include/linux/skmsg.h u32 start; start 145 include/linux/skmsg.h static inline u32 sk_msg_iter_dist(u32 start, u32 end) start 147 include/linux/skmsg.h return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); start 202 include/linux/skmsg.h return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; start 207 include/linux/skmsg.h return sk_msg_iter_dist(msg->sg.start, msg->sg.end); start 232 include/linux/skmsg.h struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); start 234 include/linux/skmsg.h if (msg->sg.copy[msg->sg.start]) { start 268 include/linux/skmsg.h static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) start 270 include/linux/skmsg.h sk_msg_sg_copy(msg, start, true); start 273 include/linux/skmsg.h static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) start 275 include/linux/skmsg.h sk_msg_sg_copy(msg, start, false); start 527 include/linux/soc/ti/ti_sci_protocol.h u16 start; start 70 include/linux/sunrpc/xdr.h xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) start 72 include/linux/sunrpc/xdr.h buf->head[0].iov_base = start; start 867 include/linux/syscalls.h asmlinkage long sys_mprotect(unsigned long start, size_t len, start 869 include/linux/syscalls.h asmlinkage long sys_msync(unsigned long start, size_t len, int flags); start 870 include/linux/syscalls.h asmlinkage long sys_mlock(unsigned long start, size_t len); start 871 include/linux/syscalls.h asmlinkage long sys_munlock(unsigned long start, size_t len); start 874 include/linux/syscalls.h asmlinkage long sys_mincore(unsigned long start, size_t len, start 876 include/linux/syscalls.h asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); start 877 include/linux/syscalls.h asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, start 880 include/linux/syscalls.h asmlinkage long sys_mbind(unsigned long start, unsigned long len, start 973 include/linux/syscalls.h asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); start 983 include/linux/syscalls.h asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, start 32 include/linux/tboot.h u64 start; /* must be 64 byte -aligned */ start 112 include/linux/tee_drv.h unsigned long start); start 470 include/linux/tee_drv.h tee_client_open_context(struct tee_context *start, start 132 include/linux/tpm_eventlog.h #define TPM_MEMREMAP(start, size) NULL start 136 include/linux/tpm_eventlog.h #define TPM_MEMUNMAP(start, size) do{} while(0) start 277 include/linux/tty_driver.h void (*start)(struct tty_struct *tty); start 133 include/linux/u64_stats_sync.h unsigned int start) start 136 include/linux/u64_stats_sync.h return read_seqcount_retry(&syncp->seq, start); start 143 include/linux/u64_stats_sync.h unsigned int start) start 148 include/linux/u64_stats_sync.h return __u64_stats_fetch_retry(syncp, start); start 166 include/linux/u64_stats_sync.h unsigned int start) start 171 include/linux/u64_stats_sync.h return __u64_stats_fetch_retry(syncp, start); start 226 include/linux/uio.h size_t maxsize, unsigned maxpages, size_t *start); start 228 include/linux/uio.h size_t maxsize, size_t *start); start 61 include/linux/uio_driver.h unsigned long start; start 118 include/linux/uprobes.h extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); start 174 include/linux/uprobes.h uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 272 include/linux/usb/hcd.h int (*start) (struct usb_hcd *hcd); start 36 include/linux/usb/isp1362.h void (*clock) (struct device *dev, int start); start 70 include/linux/userfaultfd_k.h unsigned long start, start 74 include/linux/userfaultfd_k.h unsigned long start, unsigned long end, start 127 include/linux/userfaultfd_k.h unsigned long start, start 134 include/linux/userfaultfd_k.h unsigned long start, unsigned long end, start 66 include/linux/virtio_net.h u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start); start 69 include/linux/virtio_net.h if (!skb_partial_csum_set(skb, start, off)) start 19 include/linux/vmacache.h unsigned long start, start 101 include/linux/vmalloc.h unsigned long start, unsigned long end, gfp_t gfp_mask, start 150 include/linux/vmalloc.h unsigned long start, unsigned long end); start 153 include/linux/vmalloc.h unsigned long start, unsigned long end, start 161 include/linux/vmalloc.h extern int map_kernel_range_noflush(unsigned long start, unsigned long size, start 174 include/linux/vmalloc.h map_kernel_range_noflush(unsigned long start, unsigned long size, start 67 include/linux/vringh.h u64 start, end_incl; start 52 include/linux/vt_kern.h void update_region(struct vc_data *vc, unsigned long start, int count); start 46 include/linux/watchdog.h int (*start)(struct watchdog_device *); start 397 include/linux/writeback.h pgoff_t start, pgoff_t end); start 404 include/linux/writeback.h pgoff_t start, pgoff_t end); start 359 include/linux/xarray.h unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, start 442 include/linux/xarray.h #define xa_for_each_start(xa, index, entry, start) \ start 443 include/linux/xarray.h for (index = start, \ start 107 include/linux/zorro.h #define zorro_resource_start(z) ((z)->resource.start) start 333 include/media/soc_camera.h static inline void soc_camera_limit_side(int *start, int *length, start 342 include/media/soc_camera.h if (*start < start_min) start 343 include/media/soc_camera.h *start = start_min; start 344 include/media/soc_camera.h else if (*start > start_min + length_max - *length) start 345 include/media/soc_camera.h *start = start_min + length_max - *length; start 36 include/media/videobuf2-memops.h struct frame_vector *vb2_create_framevec(unsigned long start, start 491 include/net/9p/9p.h u64 start; start 507 include/net/9p/9p.h u64 start; start 160 include/net/checksum.h int start, int offset) start 166 include/net/checksum.h csum = csum_sub(csum, csum_partial(ptr, start, 0)); start 15 include/net/fib_rules.h kuid_t start; start 159 include/net/fib_rules.h return range->start != 0 && range->end != 0; start 165 include/net/fib_rules.h return ntohs(port) >= a->start && start 171 include/net/fib_rules.h return a->start != 0 && a->end != 0 && a->end < 0xffff && start 172 include/net/fib_rules.h a->start <= a->end; start 178 include/net/fib_rules.h return a->start == b->start && start 143 include/net/genetlink.h int (*start)(struct netlink_callback *cb); start 1052 include/net/ipv6.h int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, start 436 include/net/iw_handler.h int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); start 3760 include/net/mac80211.h int (*start)(struct ieee80211_hw *hw); start 4394 include/net/mac80211.h int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start); start 4410 include/net/mac80211.h bool start) start 4415 include/net/mac80211.h ret = ieee80211_sta_ps_transition(sta, start); start 6158 include/net/mac80211.h u32 start; start 210 include/net/mac802154.h int (*start)(struct ieee802154_hw *hw); start 12 include/net/netfilter/nf_conntrack_timestamp.h u_int64_t start; start 454 include/net/netlabel.h u32 start, start 599 include/net/netlabel.h u32 start, start 1685 include/net/netlink.h struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb); start 1690 include/net/netlink.h return start; start 1718 include/net/netlink.h static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start) start 1720 include/net/netlink.h start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; start 1732 include/net/netlink.h static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) start 1734 include/net/netlink.h nlmsg_trim(skb, start); start 1751 include/net/netlink.h static inline int __nla_validate_nested(const struct nlattr *start, int maxtype, start 1756 include/net/netlink.h return __nla_validate(nla_data(start), nla_len(start), maxtype, policy, start 1761 include/net/netlink.h nl80211_validate_nested(const struct nlattr *start, int maxtype, start 1765 include/net/netlink.h return __nla_validate_nested(start, maxtype, policy, start 1770 include/net/netlink.h nla_validate_nested_deprecated(const struct nlattr *start, int maxtype, start 1774 include/net/netlink.h return __nla_validate_nested(start, maxtype, policy, start 241 include/net/regulatory.h #define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \ start 243 include/net/regulatory.h .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \ start 252 include/net/regulatory.h #define REG_RULE(start, end, bw, gain, eirp, reg_flags) \ start 253 include/net/regulatory.h REG_RULE_EXT(start, end, bw, gain, eirp, 0, reg_flags) start 80 include/net/sctp/tsnmap.h __u32 start; start 366 include/net/vxlan.h static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset) start 368 include/net/vxlan.h __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT); start 88 include/pcmcia/ss.h phys_addr_t start, stop; start 94 include/rdma/ib_umem_odp.h return umem_odp->interval_tree.start; start 148 include/rdma/ib_umem_odp.h typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end, start 155 include/rdma/ib_umem_odp.h u64 start, u64 end, start 2395 include/rdma/ib_verbs.h struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, start 2398 include/rdma/ib_verbs.h int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, start 2425 include/rdma/ib_verbs.h unsigned long start, unsigned long end); start 508 include/sound/hda_codec.h void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start); start 520 include/sound/hda_codec.h snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) {} start 561 include/sound/hdaudio.h void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start, start 608 include/sound/hdaudio.h void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start); start 624 include/sound/hdaudio.h static inline void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start) start 24 include/sound/i2c.h void (*start)(struct snd_i2c_bus *bus); /* transfer start */ start 14 include/sound/sh_dac_audio.h void (*start)(struct dac_audio_pdata *pd); start 51 include/sound/timer.h int (*start) (struct snd_timer * timer); start 107 include/sound/wss.h void (*trigger) (struct snd_wss *chip, unsigned int what, int start); start 255 include/trace/events/btrfs.h __field( u64, start ) start 268 include/trace/events/btrfs.h __entry->start = map->start; start 284 include/trace/events/btrfs.h __entry->start, start 297 include/trace/events/btrfs.h u64 start, u64 len), start 299 include/trace/events/btrfs.h TP_ARGS(fs_info, existing, map, start, len), start 306 include/trace/events/btrfs.h __field( u64, start ) start 311 include/trace/events/btrfs.h __entry->e_start = existing->start; start 313 include/trace/events/btrfs.h __entry->map_start = map->start; start 315 include/trace/events/btrfs.h __entry->start = start; start 322 include/trace/events/btrfs.h __entry->start, start 334 include/trace/events/btrfs.h struct btrfs_file_extent_item *fi, u64 start), start 336 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, start), start 366 include/trace/events/btrfs.h __entry->extent_start = start; start 367 include/trace/events/btrfs.h __entry->extent_end = (start + __entry->num_bytes); start 389 include/trace/events/btrfs.h struct btrfs_file_extent_item *fi, int slot, u64 start), start 391 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, slot, start), start 412 include/trace/events/btrfs.h __entry->extent_start = start; start 413 include/trace/events/btrfs.h __entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi)); start 430 include/trace/events/btrfs.h struct btrfs_file_extent_item *fi, u64 start), start 432 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, start) start 439 include/trace/events/btrfs.h struct btrfs_file_extent_item *fi, u64 start), start 441 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, start) start 448 include/trace/events/btrfs.h struct btrfs_file_extent_item *fi, int slot, u64 start), start 450 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, slot, start) start 457 include/trace/events/btrfs.h struct btrfs_file_extent_item *fi, int slot, u64 start), start 459 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, slot, start) start 485 include/trace/events/btrfs.h __field( u64, start ) start 499 include/trace/events/btrfs.h __entry->start = ordered->start; start 519 include/trace/events/btrfs.h __entry->start, start 619 include/trace/events/btrfs.h TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate), start 621 include/trace/events/btrfs.h TP_ARGS(page, start, end, uptodate), start 626 include/trace/events/btrfs.h __field( u64, start ) start 635 include/trace/events/btrfs.h __entry->start = start; start 646 include/trace/events/btrfs.h __entry->start, start 1002 include/trace/events/btrfs.h __entry->buf_start = buf->start; start 1004 include/trace/events/btrfs.h __entry->cow_start = cow->start; start 1126 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 len), start 1128 include/trace/events/btrfs.h TP_ARGS(fs_info, start, len), start 1131 include/trace/events/btrfs.h __field( u64, start ) start 1136 include/trace/events/btrfs.h __entry->start = start; start 1142 include/trace/events/btrfs.h __entry->start, start 1148 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 len), start 1150 include/trace/events/btrfs.h TP_ARGS(fs_info, start, len) start 1155 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 len), start 1157 include/trace/events/btrfs.h TP_ARGS(fs_info, start, len) start 1188 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, start 1191 include/trace/events/btrfs.h TP_ARGS(block_group, start, len), start 1196 include/trace/events/btrfs.h __field( u64, start ) start 1203 include/trace/events/btrfs.h __entry->start = start; start 1213 include/trace/events/btrfs.h __entry->start, __entry->len) start 1218 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, start 1221 include/trace/events/btrfs.h TP_ARGS(block_group, start, len) start 1226 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, start 1229 include/trace/events/btrfs.h TP_ARGS(block_group, start, len) start 1234 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, start 1237 include/trace/events/btrfs.h TP_ARGS(block_group, start, bytes, empty_size, min_bytes), start 1242 include/trace/events/btrfs.h __field( u64, start ) start 1251 include/trace/events/btrfs.h __entry->start = start; start 1261 include/trace/events/btrfs.h BTRFS_GROUP_FLAGS), __entry->start, start 1293 include/trace/events/btrfs.h __field( u64, start ) start 1302 include/trace/events/btrfs.h __entry->start = cluster->window_start; start 1313 include/trace/events/btrfs.h BTRFS_GROUP_FLAGS), __entry->start, start 1505 include/trace/events/btrfs.h TP_PROTO(const struct inode *inode, u64 start, u64 len, start 1508 include/trace/events/btrfs.h TP_ARGS(inode, start, len, reserved, op), start 1513 include/trace/events/btrfs.h __field( u64, start ) start 1523 include/trace/events/btrfs.h __entry->start = start; start 1530 include/trace/events/btrfs.h __entry->rootid, __entry->ino, __entry->start, __entry->len, start 1539 include/trace/events/btrfs.h TP_PROTO(const struct inode *inode, u64 start, u64 len, start 1542 include/trace/events/btrfs.h TP_ARGS(inode, start, len, reserved, op) start 1547 include/trace/events/btrfs.h TP_PROTO(const struct inode *inode, u64 start, u64 len, start 1550 include/trace/events/btrfs.h TP_ARGS(inode, start, len, reserved, op) start 1893 include/trace/events/btrfs.h u64 start, u64 len, unsigned set_bits), start 1895 include/trace/events/btrfs.h TP_ARGS(tree, start, len, set_bits), start 1901 include/trace/events/btrfs.h __field( u64, start ) start 1918 include/trace/events/btrfs.h __entry->start = start; start 1926 include/trace/events/btrfs.h __entry->rootid, __entry->start, __entry->len, start 1932 include/trace/events/btrfs.h u64 start, u64 len, unsigned clear_bits), start 1934 include/trace/events/btrfs.h TP_ARGS(tree, start, len, clear_bits), start 1940 include/trace/events/btrfs.h __field( u64, start ) start 1957 include/trace/events/btrfs.h __entry->start = start; start 1965 include/trace/events/btrfs.h __entry->rootid, __entry->start, __entry->len, start 1971 include/trace/events/btrfs.h u64 start, u64 len, unsigned set_bits, unsigned clear_bits), start 1973 include/trace/events/btrfs.h TP_ARGS(tree, start, len, set_bits, clear_bits), start 1979 include/trace/events/btrfs.h __field( u64, start ) start 1997 include/trace/events/btrfs.h __entry->start = start; start 2006 include/trace/events/btrfs.h __entry->rootid, __entry->start, __entry->len, start 2027 include/trace/events/btrfs.h __entry->block = eb->start; start 2068 include/trace/events/btrfs.h __entry->block = eb->start; start 124 include/trace/events/erofs.h __field(pgoff_t, start ) start 132 include/trace/events/erofs.h __entry->start = page->index; start 139 include/trace/events/erofs.h (unsigned long)__entry->start, start 1088 include/trace/events/ext4.h ext4_grpblk_t start, start 1091 include/trace/events/ext4.h TP_ARGS(sb, inode, group, start, len), start 1104 include/trace/events/ext4.h __entry->result_start = start; start 1121 include/trace/events/ext4.h ext4_grpblk_t start, start 1124 include/trace/events/ext4.h TP_ARGS(sb, inode, group, start, len) start 1132 include/trace/events/ext4.h ext4_grpblk_t start, start 1135 include/trace/events/ext4.h TP_ARGS(sb, inode, group, start, len) start 1797 include/trace/events/ext4.h ext4_grpblk_t start, start 1800 include/trace/events/ext4.h TP_ARGS(sb, group, start, len), start 1806 include/trace/events/ext4.h __field( int, start ) start 1814 include/trace/events/ext4.h __entry->start = start; start 1820 include/trace/events/ext4.h __entry->group, __entry->start, __entry->len) start 1827 include/trace/events/ext4.h ext4_grpblk_t start, start 1830 include/trace/events/ext4.h TP_ARGS(sb, group, start, len) start 1837 include/trace/events/ext4.h ext4_grpblk_t start, start 1840 include/trace/events/ext4.h TP_ARGS(sb, group, start, len) start 1912 include/trace/events/ext4.h ext4_fsblk_t start), start 1914 include/trace/events/ext4.h TP_ARGS(inode, lblk, len, start), start 1921 include/trace/events/ext4.h __field( ext4_fsblk_t, start ) start 1929 include/trace/events/ext4.h __entry->start = start; start 1937 include/trace/events/ext4.h (unsigned long long) __entry->start) start 2106 include/trace/events/ext4.h TP_PROTO(struct inode *inode, ext4_lblk_t start, start 2110 include/trace/events/ext4.h TP_ARGS(inode, start, ex, pc), start 2115 include/trace/events/ext4.h __field( ext4_lblk_t, start ) start 2127 include/trace/events/ext4.h __entry->start = start; start 2140 include/trace/events/ext4.h (unsigned) __entry->start, start 2173 include/trace/events/ext4.h TP_PROTO(struct inode *inode, ext4_lblk_t start, start 2176 include/trace/events/ext4.h TP_ARGS(inode, start, end, depth), start 2181 include/trace/events/ext4.h __field( ext4_lblk_t, start ) start 2189 include/trace/events/ext4.h __entry->start = start; start 2197 include/trace/events/ext4.h (unsigned) __entry->start, start 2203 include/trace/events/ext4.h TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end, start 2206 include/trace/events/ext4.h TP_ARGS(inode, start, end, depth, pc, eh_entries), start 2211 include/trace/events/ext4.h __field( ext4_lblk_t, start ) start 2223 include/trace/events/ext4.h __entry->start = start; start 2237 include/trace/events/ext4.h (unsigned) __entry->start, start 852 include/trace/events/f2fs.h __field(loff_t, start) start 860 include/trace/events/f2fs.h __entry->start = start_pos; start 867 include/trace/events/f2fs.h __entry->start, start 1378 include/trace/events/f2fs.h __field(pgoff_t, start) start 1385 include/trace/events/f2fs.h __entry->start = page->index; start 1391 include/trace/events/f2fs.h (unsigned long)__entry->start, start 228 include/trace/events/power.h TP_PROTO(const char *action, int val, bool start), start 230 include/trace/events/power.h TP_ARGS(action, val, start), start 235 include/trace/events/power.h __field(bool, start) start 241 include/trace/events/power.h __entry->start = start; start 245 include/trace/events/power.h (__entry->start)?"begin":"end") start 93 include/trace/events/sunvnet.h TP_PROTO(int lsid, int rsid, int start, int err), start 95 include/trace/events/sunvnet.h TP_ARGS(lsid, rsid, start, err), start 100 include/trace/events/sunvnet.h __field(int, start) start 107 include/trace/events/sunvnet.h __entry->start = start; start 112 include/trace/events/sunvnet.h __entry->lsid, __entry->rsid, __entry->start, start 429 include/uapi/drm/i915_drm.h int start; /* agp offset */ start 657 include/uapi/drm/i915_drm.h int start; start 383 include/uapi/drm/mga_drm.h unsigned int start; start 276 include/uapi/drm/r128_drm.h int start; start 310 include/uapi/drm/r128_drm.h int start; start 419 include/uapi/drm/radeon_drm.h unsigned int start; start 635 include/uapi/drm/radeon_drm.h int start; start 691 include/uapi/drm/radeon_drm.h int start; start 749 include/uapi/drm/radeon_drm.h int start; start 187 include/uapi/drm/savage_drm.h unsigned short start; /* first register */ start 195 include/uapi/drm/savage_drm.h unsigned short start; /* first vertex in DMA/vertex buffer */ start 294 include/uapi/linux/bcache.h struct bkey start[0]; start 359 include/uapi/linux/bcache.h struct bkey start[0]; start 52 include/uapi/linux/blkpg.h long long start; /* starting offset in bytes */ start 95 include/uapi/linux/blkzoned.h __u64 start; /* Zone start sector */ start 167 include/uapi/linux/btrfs.h __u64 start; /* in */ start 211 include/uapi/linux/btrfs.h struct btrfs_ioctl_dev_replace_start_params start; start 543 include/uapi/linux/btrfs.h __u64 start; start 40 include/uapi/linux/dlm_plock.h __u64 start; start 282 include/uapi/linux/fb.h __u32 start; /* First entry */ start 34 include/uapi/linux/fib_rules.h __u32 start; start 39 include/uapi/linux/fib_rules.h __u16 start; start 62 include/uapi/linux/fs.h __u64 start; start 251 include/uapi/linux/fuse.h uint64_t start; start 328 include/uapi/linux/hdreg.h unsigned long start; start 181 include/uapi/linux/msdos_fs.h __le16 time,date,start;/* time, date and first cluster */ start 193 include/uapi/linux/msdos_fs.h __le16 start; /* starting cluster number, 0 in long slots */ start 51 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFA_NEST_END(skb, start) \ start 52 include/uapi/linux/netfilter/nfnetlink_compat.h ({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ start 54 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFA_NEST_CANCEL(skb, start) \ start 55 include/uapi/linux/netfilter/nfnetlink_compat.h ({ if (start) \ start 56 include/uapi/linux/netfilter/nfnetlink_compat.h skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ start 4886 include/uapi/linux/nl80211.h __u32 start, offset, len; start 102 include/uapi/linux/ptp_clock.h struct ptp_clock_time start; /* Absolute start time. */ start 196 include/uapi/linux/uhid.h struct uhid_start_req start; start 100 include/uapi/linux/userfaultfd.h __u64 start; start 185 include/uapi/linux/userfaultfd.h __u64 start; start 511 include/uapi/linux/vfio.h __u32 start; start 740 include/uapi/linux/vfio.h __u64 start; start 2014 include/uapi/linux/videodev2.h } start; start 2036 include/uapi/linux/videodev2.h __s32 start[2]; start 21 include/uapi/linux/virtio_iommu.h __le64 start; start 26 include/uapi/linux/virtio_iommu.h __le32 start; start 129 include/uapi/linux/virtio_iommu.h __le64 start; start 18 include/uapi/linux/virtio_pmem.h __u64 start; start 27 include/uapi/mtd/mtd-abi.h __u32 start; start 32 include/uapi/mtd/mtd-abi.h __u64 start; start 37 include/uapi/mtd/mtd-abi.h __u32 start; start 43 include/uapi/mtd/mtd-abi.h __u64 start; start 85 include/uapi/mtd/mtd-abi.h __u64 start; start 146 include/uapi/mtd/mtd-abi.h __u32 start; start 341 include/uapi/rdma/ib_user_verbs.h __aligned_u64 start; start 360 include/uapi/rdma/ib_user_verbs.h __aligned_u64 start; start 119 include/uapi/sound/sfnt_info.h int start, end; /* sample offset correction */ start 164 include/uapi/sound/sfnt_info.h int start, end; /* start & end offset */ start 21 include/vdso/helpers.h u32 start) start 27 include/vdso/helpers.h return seq != start; start 302 include/xen/grant_table.h static inline unsigned int gnttab_count_grant(unsigned int start, start 305 include/xen/grant_table.h return XEN_PFN_UP(xen_offset_in_page(start) + len); start 37 init/calibrate.c unsigned long pre_start, start, post_start; start 72 init/calibrate.c read_current_timer(&start); start 75 init/calibrate.c pre_start = start; start 76 init/calibrate.c read_current_timer(&start); start 98 init/calibrate.c if (start >= post_end) start 102 init/calibrate.c start, post_end); start 103 init/calibrate.c if (start < post_end && pre_start != 0 && pre_end != 0 && start 33 init/do_mounts_initrd.c phys_addr_t start; start 37 init/do_mounts_initrd.c start = memparse(p, &endp); start 41 init/do_mounts_initrd.c phys_initrd_start = start; start 530 init/initramfs.c void __weak free_initrd_mem(unsigned long start, unsigned long end) start 532 init/initramfs.c free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, start 539 init/initramfs.c unsigned long crashk_start = (unsigned long)__va(crashk_res.start); start 525 ipc/shm.c static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) start 531 ipc/shm.c return sfd->file->f_op->fsync(sfd->file, start, end, datasync); start 857 ipc/util.c .start = sysvipc_proc_start, start 3128 kernel/bpf/btf.c const char *start, *end; start 3131 kernel/bpf/btf.c start = btf->nohdr_data + hdr->str_off; start 3132 kernel/bpf/btf.c end = start + hdr->str_len; start 3140 kernel/bpf/btf.c start[0] || end[-1]) { start 3145 kernel/bpf/btf.c btf->strings = start; start 801 kernel/bpf/core.c u32 size, hole, start, pages; start 824 kernel/bpf/core.c start = (get_random_int() % hole) & ~(alignment - 1); start 827 kernel/bpf/core.c *image_ptr = &hdr->image[start]; start 243 kernel/bpf/inode.c .start = map_seq_start, start 1385 kernel/bpf/syscall.c unsigned int start; start 1390 kernel/bpf/syscall.c start = u64_stats_fetch_begin_irq(&st->syncp); start 1393 kernel/bpf/syscall.c } while (u64_stats_fetch_retry_irq(&st->syncp, start)); start 1083 kernel/bpf/verifier.c return ((struct bpf_subprog_info *)a)->start - start 1084 kernel/bpf/verifier.c ((struct bpf_subprog_info *)b)->start; start 1115 kernel/bpf/verifier.c env->subprog_info[env->subprog_cnt++].start = off; start 1151 kernel/bpf/verifier.c subprog[env->subprog_cnt].start = insn_cnt; start 1155 kernel/bpf/verifier.c verbose(env, "func#%d @%d\n", i, subprog[i].start); start 1158 kernel/bpf/verifier.c subprog_start = subprog[cur_subprog].start; start 1159 kernel/bpf/verifier.c subprog_end = subprog[cur_subprog + 1].start; start 1186 kernel/bpf/verifier.c subprog_end = subprog[cur_subprog + 1].start; start 2625 kernel/bpf/verifier.c subprog_end = subprog[idx + 1].start; start 2667 kernel/bpf/verifier.c int start = idx + insn->imm + 1, subprog; start 2669 kernel/bpf/verifier.c subprog = find_subprog(env, start); start 2672 kernel/bpf/verifier.c start); start 6561 kernel/bpf/verifier.c if (env->subprog_info[i].start != krecord[i].insn_off) { start 6597 kernel/bpf/verifier.c env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; start 6695 kernel/bpf/verifier.c if (linfo[i].insn_off == sub[s].start) { start 6698 kernel/bpf/verifier.c } else if (sub[s].start < linfo[i].insn_off) { start 8204 kernel/bpf/verifier.c if (env->subprog_info[i].start <= off) start 8206 kernel/bpf/verifier.c env->subprog_info[i].start += len - 1; start 8236 kernel/bpf/verifier.c if (env->subprog_info[i].start >= off) start 8240 kernel/bpf/verifier.c if (env->subprog_info[j].start >= off + cnt) start 8245 kernel/bpf/verifier.c if (env->subprog_info[j].start != off + cnt) start 8274 kernel/bpf/verifier.c if (env->subprog_info[i].start == off) start 8280 kernel/bpf/verifier.c env->subprog_info[i].start -= cnt; start 8790 kernel/bpf/verifier.c subprog_end = env->subprog_info[i + 1].start; start 304 kernel/cgroup/cgroup.c static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, start 311 kernel/cgroup/cgroup.c ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM); start 45 kernel/crash_core.c unsigned long long start, end = ULLONG_MAX, size; start 48 kernel/crash_core.c start = memparse(cur, &tmp); start 68 kernel/crash_core.c if (end <= start) { start 92 kernel/crash_core.c if (system_ram >= start && system_ram < end) { start 45 kernel/delayacct.c static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, start 48 kernel/delayacct.c s64 ns = ktime_get_ns() - *start; start 244 kernel/dma/coherent.c int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; start 250 kernel/dma/coherent.c unsigned long pfn = mem->pfn_base + start + off; start 1121 kernel/dma/debug.c static inline bool overlap(void *addr, unsigned long len, void *start, void *end) start 1125 kernel/dma/debug.c unsigned long a2 = (unsigned long)start; start 1217 kernel/dma/debug.c u64 start, end, boundary = dma_get_seg_boundary(dev); start 1231 kernel/dma/debug.c start = sg_dma_address(sg); start 1232 kernel/dma/debug.c end = start + sg_dma_len(sg) - 1; start 1233 kernel/dma/debug.c if ((start ^ end) & ~boundary) start 1235 kernel/dma/debug.c start, end, boundary); start 176 kernel/dma/remap.c bool dma_in_atomic_pool(void *start, size_t size) start 181 kernel/dma/remap.c return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); start 206 kernel/dma/remap.c bool dma_free_from_pool(void *start, size_t size) start 208 kernel/dma/remap.c if (!dma_in_atomic_pool(start, size)) start 210 kernel/dma/remap.c gen_pool_free(atomic_pool, (unsigned long)start, size); start 2870 kernel/events/core.c event->pmu->start(event, 0); start 3695 kernel/events/core.c event->pmu->start(event, PERF_EF_RELOAD); start 3737 kernel/events/core.c event->pmu->start(event, 0); start 3762 kernel/events/core.c event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); start 5104 kernel/events/core.c event->pmu->start(event, PERF_EF_RELOAD); start 6894 kernel/events/core.c event->addr_filter_ranges[count].start = 0; start 7370 kernel/events/core.c u64 start; start 7596 kernel/events/core.c fr->start = vma->vm_start; start 7599 kernel/events/core.c fr->start = vma->vm_start + filter->offset - off; start 7600 kernel/events/core.c fr->size = min(vma->vm_end - fr->start, filter->size); start 7682 kernel/events/core.c .start = vma->vm_start, start 8666 kernel/events/core.c .start = perf_swevent_start, start 8810 kernel/events/core.c .start = perf_swevent_start, start 8861 kernel/events/core.c .start = perf_swevent_start, start 8920 kernel/events/core.c .start = perf_swevent_start, start 9264 kernel/events/core.c event->addr_filter_ranges[count].start = 0; start 9269 kernel/events/core.c event->addr_filter_ranges[count].start = filter->offset; start 9344 kernel/events/core.c char *start, *orig, *filename = NULL; start 9354 kernel/events/core.c while ((start = strsep(&fstr, " ,\n")) != NULL) { start 9362 kernel/events/core.c if (!*start) start 9372 kernel/events/core.c token = match_token(start, if_tokens, args); start 9737 kernel/events/core.c .start = cpu_clock_event_start, start 9818 kernel/events/core.c .start = task_clock_event_start, start 656 kernel/events/hw_breakpoint.c .start = hw_breakpoint_start, start 1301 kernel/events/uprobes.c unsigned long start, unsigned long end, start 1309 kernel/events/uprobes.c min = vaddr_to_offset(vma, start); start 1310 kernel/events/uprobes.c max = min + (end - start) - 1; start 1410 kernel/events/uprobes.c vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 1418 kernel/events/uprobes.c min = vaddr_to_offset(vma, start); start 1419 kernel/events/uprobes.c max = min + (end - start) - 1; start 1431 kernel/events/uprobes.c void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) start 1443 kernel/events/uprobes.c if (vma_has_uprobes(vma, start, end)) start 211 kernel/fail_function.c .start = fei_seq_start, start 134 kernel/gcov/fs.c .start = gcov_seq_start, start 466 kernel/irq/irqdesc.c static int alloc_descs(unsigned int start, unsigned int cnt, int node, start 495 kernel/irq/irqdesc.c desc = alloc_desc(start + i, node, flags, mask, owner); start 498 kernel/irq/irqdesc.c irq_insert_desc(start + i, desc); start 499 kernel/irq/irqdesc.c irq_sysfs_add(start + i, desc); start 500 kernel/irq/irqdesc.c irq_add_debugfs_entry(start + i, desc); start 502 kernel/irq/irqdesc.c bitmap_set(allocated_irqs, start, cnt); start 503 kernel/irq/irqdesc.c return start; start 507 kernel/irq/irqdesc.c free_desc(start + i); start 597 kernel/irq/irqdesc.c static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, start 604 kernel/irq/irqdesc.c struct irq_desc *desc = irq_to_desc(start + i); start 608 kernel/irq/irqdesc.c bitmap_set(allocated_irqs, start, cnt); start 609 kernel/irq/irqdesc.c return start; start 769 kernel/irq/irqdesc.c int start, ret; start 789 kernel/irq/irqdesc.c start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, start 792 kernel/irq/irqdesc.c if (irq >=0 && start != irq) start 795 kernel/irq/irqdesc.c if (start + cnt > nr_irqs) { start 796 kernel/irq/irqdesc.c ret = irq_expand_nr_irqs(start + cnt); start 800 kernel/irq/irqdesc.c ret = alloc_descs(start, cnt, node, affinity, owner); start 113 kernel/irq/matrix.c unsigned int area, start = m->alloc_start; start 118 kernel/irq/matrix.c area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0); start 384 kernel/irq/timings.c int index, i, period_max, count, start, min = INT_MAX; start 412 kernel/irq/timings.c start = irqs->count < IRQ_TIMINGS_SIZE ? start 422 kernel/irq/timings.c int index = (start + i) & IRQ_TIMINGS_MASK; start 711 kernel/irq/timings.c int index, start, i, count, period_max; start 730 kernel/irq/timings.c start = count < IRQ_TIMINGS_SIZE ? 0 : start 736 kernel/irq/timings.c int index = (start + i) & IRQ_TIMINGS_MASK; start 832 kernel/irq/timings.c int start = count >= IRQ_TIMINGS_SIZE ? count - IRQ_TIMINGS_SIZE : 0; start 850 kernel/irq/timings.c ots += start; start 851 kernel/irq/timings.c oirq += start; start 80 kernel/jump_label.c jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) start 88 kernel/jump_label.c size = (((unsigned long)stop - (unsigned long)start) start 90 kernel/jump_label.c sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); start 309 kernel/jump_label.c static int addr_conflict(struct jump_entry *entry, void *start, void *end) start 312 kernel/jump_label.c jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) start 319 kernel/jump_label.c struct jump_entry *iter_stop, void *start, void *end) start 325 kernel/jump_label.c if (addr_conflict(iter, start, end)) start 539 kernel/jump_label.c static int __jump_label_mod_text_reserved(void *start, void *end) start 544 kernel/jump_label.c mod = __module_text_address((unsigned long)start); start 554 kernel/jump_label.c start, end); start 773 kernel/jump_label.c int jump_label_text_reserved(void *start, void *end) start 776 kernel/jump_label.c __stop___jump_table, start, end); start 782 kernel/jump_label.c ret = __jump_label_mod_text_reserved(start, end); start 624 kernel/kallsyms.c .start = s_start, start 50 kernel/kexec.c if ((entry < phys_to_boot_phys(crashk_res.start)) || start 60 kernel/kexec.c image->start = entry; start 68 kernel/kexec.c image->control_page = crashk_res.start; start 60 kernel/kexec_core.c .start = 0, start 67 kernel/kexec_core.c .start = 0, start 245 kernel/kexec_core.c if ((mstart < phys_to_boot_phys(crashk_res.start)) || start 282 kernel/kexec_core.c unsigned long start, start 292 kernel/kexec_core.c if ((end > mstart) && (start < mend)) start 991 kernel/kexec_core.c if (crashk_res.end != crashk_res.start) start 1009 kernel/kexec_core.c unsigned long start, end; start 1019 kernel/kexec_core.c start = crashk_res.start; start 1021 kernel/kexec_core.c old_size = (end == 0) ? 0 : end - start + 1; start 1033 kernel/kexec_core.c start = roundup(start, KEXEC_CRASH_MEM_ALIGN); start 1034 kernel/kexec_core.c end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); start 1038 kernel/kexec_core.c if ((start == end) && (crashk_res.parent != NULL)) start 1041 kernel/kexec_core.c ram_res->start = end; start 330 kernel/kexec_file.c image->control_page = crashk_res.start; start 460 kernel/kexec_file.c static int locate_mem_hole_top_down(unsigned long start, unsigned long end, start 473 kernel/kexec_file.c if (temp_start < start || temp_start < kbuf->buf_min) start 498 kernel/kexec_file.c static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, start 504 kernel/kexec_file.c temp_start = max(start, kbuf->buf_min); start 535 kernel/kexec_file.c u64 start = res->start, end = res->end; start 536 kernel/kexec_file.c unsigned long sz = end - start + 1; start 542 kernel/kexec_file.c if (end < kbuf->buf_min || start > kbuf->buf_max) start 550 kernel/kexec_file.c return locate_mem_hole_top_down(start, end, kbuf); start 551 kernel/kexec_file.c return locate_mem_hole_bottom_up(start, end, kbuf); start 574 kernel/kexec_file.c res.start = mstart; start 588 kernel/kexec_file.c res.start = mstart; start 621 kernel/kexec_file.c crashk_res.start, crashk_res.end, start 788 kernel/kexec_file.c sha_regions[j].start = ksegment->mem; start 913 kernel/kexec_file.c kbuf->image->start = pi->ehdr->e_entry; start 935 kernel/kexec_file.c kbuf->image->start -= sechdrs[i].sh_addr; start 936 kernel/kexec_file.c kbuf->image->start += kbuf->mem + offset; start 1173 kernel/kexec_file.c unsigned long long start, end; start 1177 kernel/kexec_file.c start = mem->ranges[i].start; start 1180 kernel/kexec_file.c if (mstart > end || mend < start) start 1184 kernel/kexec_file.c if (mstart < start) start 1185 kernel/kexec_file.c mstart = start; start 1190 kernel/kexec_file.c if (mstart == start && mend == end) { start 1191 kernel/kexec_file.c mem->ranges[i].start = 0; start 1196 kernel/kexec_file.c mem->ranges[j].start = start 1197 kernel/kexec_file.c mem->ranges[j+1].start; start 1206 kernel/kexec_file.c if (mstart > start && mend < end) { start 1209 kernel/kexec_file.c temp_range.start = mend + 1; start 1211 kernel/kexec_file.c } else if (mstart != start) start 1214 kernel/kexec_file.c mem->ranges[i].start = mend + 1; start 1234 kernel/kexec_file.c mem->ranges[j].start = temp_range.start; start 1316 kernel/kexec_file.c mstart = mem->ranges[i].start; start 14 kernel/kexec_internal.h unsigned long start, unsigned long end); start 2167 kernel/kprobes.c int kprobe_add_area_blacklist(unsigned long start, unsigned long end) start 2172 kernel/kprobes.c for (entry = start; entry < end; entry += ret) { start 2195 kernel/kprobes.c static int __init populate_kprobe_blacklist(unsigned long *start, start 2202 kernel/kprobes.c for (iter = start; iter < end; iter++) { start 2402 kernel/kprobes.c .start = kprobe_seq_start, start 2450 kernel/kprobes.c .start = kprobe_blacklist_seq_start, start 738 kernel/locking/lockdep.c unsigned long start = (unsigned long) &_stext, start 748 kernel/locking/lockdep.c if ((addr >= start) && (addr < end)) start 4900 kernel/locking/lockdep.c static inline int within(const void *addr, void *start, unsigned long size) start 4902 kernel/locking/lockdep.c return addr >= start && addr < start + size; start 4992 kernel/locking/lockdep.c static void __lockdep_free_key_range(struct pending_free *pf, void *start, start 5003 kernel/locking/lockdep.c if (!within(class->key, start, size) && start 5004 kernel/locking/lockdep.c !within(class->name, start, size)) start 5019 kernel/locking/lockdep.c static void lockdep_free_key_range_reg(void *start, unsigned long size) start 5030 kernel/locking/lockdep.c __lockdep_free_key_range(pf, start, size); start 5047 kernel/locking/lockdep.c static void lockdep_free_key_range_imm(void *start, unsigned long size) start 5056 kernel/locking/lockdep.c __lockdep_free_key_range(pf, start, size); start 5062 kernel/locking/lockdep.c void lockdep_free_key_range(void *start, unsigned long size) start 5067 kernel/locking/lockdep.c lockdep_free_key_range_imm(start, size); start 5069 kernel/locking/lockdep.c lockdep_free_key_range_reg(start, size); start 98 kernel/locking/lockdep_proc.c .start = l_start, start 156 kernel/locking/lockdep_proc.c .start = lc_start, start 581 kernel/locking/lockdep_proc.c .start = ls_start, start 110 kernel/locking/qspinlock_stat.h u64 start = sched_clock(); start 112 kernel/locking/qspinlock_stat.h per_cpu(pv_kick_time, cpu) = start; start 114 kernel/locking/qspinlock_stat.h this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start); start 129 kernel/module.c unsigned long start, end; start 131 kernel/module.c start = __mod_tree_val(n); start 132 kernel/module.c if (val < start) start 135 kernel/module.c end = start + __mod_tree_size(n); start 528 kernel/module.c fsa->sym = &syms->start[symnum]; start 573 kernel/module.c sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, start 577 kernel/module.c sym - syms->start, data)) start 701 kernel/module.c void *start = per_cpu_ptr(mod->percpu, cpu); start 704 kernel/module.c if (va >= start && va < start + mod->percpu_size) { start 706 kernel/module.c *can_addr = (unsigned long) (va - start); start 1961 kernel/module.c int (*set_memory)(unsigned long start, int num_pages)) start 1971 kernel/module.c int (*set_memory)(unsigned long start, int num_pages)) start 1981 kernel/module.c int (*set_memory)(unsigned long start, int num_pages)) start 1991 kernel/module.c int (*set_memory)(unsigned long start, int num_pages)) start 2598 kernel/module.c const struct kernel_symbol *start, start 2601 kernel/module.c return bsearch(name, start, stop - start, start 4026 kernel/module.c static inline int within(unsigned long addr, void *start, unsigned long size) start 4028 kernel/module.c return ((void *)addr >= start && (void *)addr < start + size); start 4375 kernel/module.c .start = m_start, start 239 kernel/power/hibernate.c void swsusp_show_speed(ktime_t start, ktime_t stop, start 248 kernel/power/hibernate.c diff = ktime_sub(stop, start); start 968 kernel/power/hibernate.c char *start = buf; start 996 kernel/power/hibernate.c return buf-start; start 56 kernel/power/main.c ktime_t start; start 59 kernel/power/main.c start = ktime_get(); start 61 kernel/power/main.c elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start)); start 37 kernel/power/process.c ktime_t start, end, elapsed; start 42 kernel/power/process.c start = ktime_get_boottime(); start 85 kernel/power/process.c elapsed = ktime_sub(end, start); start 501 kernel/power/snapshot.c unsigned long start, start 508 kernel/power/snapshot.c pages = end - start; start 515 kernel/power/snapshot.c zone->start_pfn = start; start 562 kernel/power/snapshot.c unsigned long start; start 604 kernel/power/snapshot.c if (&ext->hook == list || zone_end < ext->start) { start 613 kernel/power/snapshot.c new_ext->start = zone_start; start 620 kernel/power/snapshot.c if (zone_start < ext->start) start 621 kernel/power/snapshot.c ext->start = zone_start; start 628 kernel/power/snapshot.c if (zone_end < cur->start) start 662 kernel/power/snapshot.c ext->start, ext->end); start 1705 kernel/power/snapshot.c ktime_t start, stop; start 1709 kernel/power/snapshot.c start = ktime_get(); start 1840 kernel/power/snapshot.c swsusp_show_speed(start, stop, pages, "Allocated"); start 124 kernel/power/swap.c unsigned long start; start 140 kernel/power/swap.c if (swap_offset < ext->start) { start 142 kernel/power/swap.c if (swap_offset == ext->start - 1) { start 143 kernel/power/swap.c ext->start--; start 164 kernel/power/swap.c ext->start = swap_offset; start 206 kernel/power/swap.c for (offset = ext->start; offset <= ext->end; offset++) start 539 kernel/power/swap.c ktime_t start; start 550 kernel/power/swap.c start = ktime_get(); start 569 kernel/power/swap.c swsusp_show_speed(start, stop, nr_to_write, "Wrote"); start 675 kernel/power/swap.c ktime_t start; start 768 kernel/power/swap.c start = ktime_get(); start 855 kernel/power/swap.c swsusp_show_speed(start, stop, nr_to_write, "Wrote"); start 1055 kernel/power/swap.c ktime_t start; start 1069 kernel/power/swap.c start = ktime_get(); start 1096 kernel/power/swap.c swsusp_show_speed(start, stop, nr_to_read, "Read"); start 1162 kernel/power/swap.c ktime_t start; start 1285 kernel/power/swap.c start = ktime_get(); start 1448 kernel/power/swap.c swsusp_show_speed(start, stop, nr_to_read, "Read"); start 121 kernel/printk/printk_safe.c static int printk_safe_flush_buffer(const char *start, size_t len) start 126 kernel/printk/printk_safe.c c = start; start 127 kernel/printk/printk_safe.c end = start + len; start 133 kernel/printk/printk_safe.c printk_safe_flush_line(start, c - start + 1); start 134 kernel/printk/printk_safe.c start = ++c; start 146 kernel/printk/printk_safe.c printk_safe_flush_line(start, c - start); start 147 kernel/printk/printk_safe.c start = c++; start 157 kernel/printk/printk_safe.c if (start < end && !header) { start 160 kernel/printk/printk_safe.c printk_safe_flush_line(start, end - start); start 11 kernel/range.c int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) start 13 kernel/range.c if (start >= end) start 20 kernel/range.c range[nr_range].start = start; start 29 kernel/range.c u64 start, u64 end) start 33 kernel/range.c if (start >= end) start 43 kernel/range.c common_start = max(range[i].start, start); start 49 kernel/range.c start = min(range[i].start, start); start 54 kernel/range.c range[nr_range - 1].start = 0; start 61 kernel/range.c return add_range(range, az, nr_range, start, end); start 64 kernel/range.c void subtract_range(struct range *range, int az, u64 start, u64 end) start 68 kernel/range.c if (start >= end) start 75 kernel/range.c if (start <= range[j].start && end >= range[j].end) { start 76 kernel/range.c range[j].start = 0; start 81 kernel/range.c if (start <= range[j].start && end < range[j].end && start 82 kernel/range.c range[j].start < end) { start 83 kernel/range.c range[j].start = end; start 88 kernel/range.c if (start > range[j].start && end >= range[j].end && start 89 kernel/range.c range[j].end > start) { start 90 kernel/range.c range[j].end = start; start 94 kernel/range.c if (start > range[j].start && end < range[j].end) { start 102 kernel/range.c range[i].start = end; start 107 kernel/range.c range[j].end = start; start 118 kernel/range.c if (r1->start < r2->start) start 120 kernel/range.c if (r1->start > r2->start) start 140 kernel/range.c range[i].start = range[k].start; start 142 kernel/range.c range[k].start = 0; start 785 kernel/rcu/rcutorture.c static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) start 787 kernel/rcu/rcutorture.c if (end - start > test_boost_duration * HZ - HZ / 2) { start 51 kernel/relay.c page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); start 177 kernel/relay.c buf->start = relay_alloc_buf(buf, &chan->alloc_size); start 178 kernel/relay.c if (!buf->start) start 212 kernel/relay.c if (likely(buf->start)) { start 213 kernel/relay.c vunmap(buf->start); start 367 kernel/relay.c buf->data = buf->start; start 780 kernel/relay.c new = buf->start + new_subbuf * buf->chan->subbuf_size; start 1050 kernel/relay.c write_subbuf = (buf->data - buf->start) / subbuf_size; start 1148 kernel/relay.c from = buf->start + read_start; start 32 kernel/resource.c .start = 0, start 40 kernel/resource.c .start = 0, start 109 kernel/resource.c unsigned long long start, end; start 118 kernel/resource.c start = r->start; start 121 kernel/resource.c start = end = 0; start 126 kernel/resource.c width, start, start 133 kernel/resource.c .start = r_start, start 187 kernel/resource.c resource_size_t start = new->start; start 191 kernel/resource.c if (end < start) start 193 kernel/resource.c if (start < root->start) start 200 kernel/resource.c if (!tmp || tmp->start > end) { start 207 kernel/resource.c if (tmp->end < start) start 260 kernel/resource.c tmp->start = 0; start 341 kernel/resource.c static int find_next_iomem_res(resource_size_t start, resource_size_t end, start 351 kernel/resource.c if (start >= end) start 358 kernel/resource.c if (p->start > end) { start 364 kernel/resource.c if (p->end < start) start 385 kernel/resource.c res->start = max(start, p->start); start 395 kernel/resource.c static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, start 403 kernel/resource.c while (start < end && start 404 kernel/resource.c !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) { start 409 kernel/resource.c start = res.end + 1; start 431 kernel/resource.c int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, start 434 kernel/resource.c return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func); start 445 kernel/resource.c int walk_system_ram_res(u64 start, u64 end, void *arg, start 450 kernel/resource.c return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true, start 458 kernel/resource.c int walk_mem_res(u64 start, u64 end, void *arg, start 463 kernel/resource.c return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true, start 478 kernel/resource.c resource_size_t start, end; start 484 kernel/resource.c start = (u64) start_pfn << PAGE_SHIFT; start 487 kernel/resource.c while (start < end && start 488 kernel/resource.c !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, start 490 kernel/resource.c pfn = PFN_UP(res.start); start 496 kernel/resource.c start = res.end + 1; start 536 kernel/resource.c int region_intersects(resource_size_t start, size_t size, unsigned long flags, start 543 kernel/resource.c res.start = start; start 544 kernel/resource.c res.end = start + size - 1; start 576 kernel/resource.c return avail->start; start 582 kernel/resource.c if (res->start < min) start 583 kernel/resource.c res->start = min; start 600 kernel/resource.c tmp.start = root->start; start 605 kernel/resource.c if (this && this->start == root->start) { start 606 kernel/resource.c tmp.start = (this == old) ? old->start : this->end + 1; start 611 kernel/resource.c tmp.end = (this == old) ? this->end : this->start - 1; start 615 kernel/resource.c if (tmp.end < tmp.start) start 622 kernel/resource.c avail.start = ALIGN(tmp.start, constraint->align); start 625 kernel/resource.c if (avail.start >= tmp.start) { start 627 kernel/resource.c alloc.start = constraint->alignf(constraint->alignf_data, &avail, start 629 kernel/resource.c alloc.end = alloc.start + size - 1; start 630 kernel/resource.c if (alloc.start <= alloc.end && start 632 kernel/resource.c new->start = alloc.start; start 642 kernel/resource.c tmp.start = this->end + 1; start 682 kernel/resource.c old->start = new.start; start 693 kernel/resource.c old->start = new.start; start 763 kernel/resource.c struct resource *lookup_resource(struct resource *root, resource_size_t start) start 769 kernel/resource.c if (res->start == start) start 795 kernel/resource.c if ((first->start > new->start) || (first->end < new->end)) start 797 kernel/resource.c if ((first->start == new->start) && (first->end == new->end)) start 803 kernel/resource.c if (next->start < new->start || next->end > new->end) start 807 kernel/resource.c if (next->sibling->start > new->end) start 899 kernel/resource.c if (conflict->start < new->start) start 900 kernel/resource.c new->start = conflict->start; start 935 kernel/resource.c static int __adjust_resource(struct resource *res, resource_size_t start, start 939 kernel/resource.c resource_size_t end = start + size - 1; start 945 kernel/resource.c if ((start < parent->start) || (end > parent->end)) start 948 kernel/resource.c if (res->sibling && (res->sibling->start <= end)) start 955 kernel/resource.c if (start <= tmp->end) start 961 kernel/resource.c if ((tmp->start < start) || (tmp->end > end)) start 964 kernel/resource.c res->start = start; start 982 kernel/resource.c int adjust_resource(struct resource *res, resource_size_t start, start 988 kernel/resource.c result = __adjust_resource(res, start, size); start 995 kernel/resource.c __reserve_region_with_split(struct resource *root, resource_size_t start, start 1008 kernel/resource.c res->start = start; start 1025 kernel/resource.c if (conflict->start <= res->start && start 1033 kernel/resource.c if (conflict->start > res->start) { start 1035 kernel/resource.c res->end = conflict->start - 1; start 1043 kernel/resource.c next_res->start = conflict->end + 1; start 1049 kernel/resource.c res->start = conflict->end + 1; start 1056 kernel/resource.c reserve_region_with_split(struct resource *root, resource_size_t start, start 1062 kernel/resource.c if (root->start > start || root->end < end) { start 1064 kernel/resource.c (unsigned long long)start, (unsigned long long)end, start 1066 kernel/resource.c if (start > root->end || end < root->start) start 1071 kernel/resource.c if (start < root->start) start 1072 kernel/resource.c start = root->start; start 1074 kernel/resource.c (unsigned long long)start, start 1080 kernel/resource.c __reserve_region_with_split(root, start, end, name); start 1096 kernel/resource.c return res->start; start 1124 kernel/resource.c resource_size_t start, resource_size_t n, start 1134 kernel/resource.c res->start = start; start 1135 kernel/resource.c res->end = start + n - 1; start 1191 kernel/resource.c void __release_region(struct resource *parent, resource_size_t start, start 1198 kernel/resource.c end = start + n - 1; start 1207 kernel/resource.c if (res->start <= start && res->end >= end) { start 1212 kernel/resource.c if (res->start != start || res->end != end) start 1227 kernel/resource.c "<%016llx-%016llx>\n", (unsigned long long)start, start 1254 kernel/resource.c resource_size_t start, resource_size_t size) start 1262 kernel/resource.c end = start + size - 1; start 1263 kernel/resource.c if ((start < parent->start) || (end > parent->end)) start 1273 kernel/resource.c if (res->start >= end) start 1277 kernel/resource.c if (res->start > start || res->end < end) { start 1306 kernel/resource.c if (res->start == start && res->end == end) { start 1311 kernel/resource.c } else if (res->start == start && res->end != end) { start 1315 kernel/resource.c } else if (res->start != start && res->end == end) { start 1317 kernel/resource.c ret = __adjust_resource(res, res->start, start 1318 kernel/resource.c start - res->start); start 1326 kernel/resource.c new_res->start = end + 1; start 1334 kernel/resource.c ret = __adjust_resource(res, res->start, start 1335 kernel/resource.c start - res->start); start 1426 kernel/resource.c resource_size_t start; start 1434 kernel/resource.c __release_region(this->parent, this->start, this->n); start 1442 kernel/resource.c this->start == match->start && this->n == match->n; start 1447 kernel/resource.c resource_size_t start, resource_size_t n, const char *name) start 1458 kernel/resource.c dr->start = start; start 1461 kernel/resource.c res = __request_region(parent, start, n, name, 0); start 1472 kernel/resource.c resource_size_t start, resource_size_t n) start 1474 kernel/resource.c struct region_devres match_data = { parent, start, n }; start 1476 kernel/resource.c __release_region(parent, start, n); start 1515 kernel/resource.c res->start = io_start; start 1544 kernel/resource.c if (p->start >= addr + size) start 1548 kernel/resource.c if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && start 1600 kernel/resource.c if (p->start >= addr + size) start 1657 kernel/resource.c for (; addr > size && addr >= base->start; addr -= size) { start 900 kernel/sched/cpufreq_schedutil.c .start = sugov_start, start 799 kernel/sched/debug.c .start = sched_debug_start, start 2491 kernel/sched/fair.c unsigned long start, end; start 2536 kernel/sched/fair.c start = mm->numa_scan_offset; start 2546 kernel/sched/fair.c vma = find_vma(mm, start); start 2549 kernel/sched/fair.c start = 0; start 2576 kernel/sched/fair.c start = max(start, vma->vm_start); start 2577 kernel/sched/fair.c end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); start 2579 kernel/sched/fair.c nr_pte_updates = change_prot_numa(vma, start, end); start 2590 kernel/sched/fair.c pages -= (end - start) >> PAGE_SHIFT; start 2591 kernel/sched/fair.c virtpages -= (end - start) >> PAGE_SHIFT; start 2593 kernel/sched/fair.c start = end; start 2609 kernel/sched/fair.c mm->numa_scan_offset = start; start 117 kernel/sched/stats.c .start = schedstat_start, start 365 kernel/time/alarmtimer.c void alarm_start(struct alarm *alarm, ktime_t start) start 371 kernel/time/alarmtimer.c alarm->node.expires = start; start 385 kernel/time/alarmtimer.c void alarm_start_relative(struct alarm *alarm, ktime_t start) start 389 kernel/time/alarmtimer.c start = ktime_add_safe(start, base->gettime()); start 390 kernel/time/alarmtimer.c alarm_start(alarm, start); start 344 kernel/time/posix-cpu-timers.c bool start) start 351 kernel/time/posix-cpu-timers.c if (start) start 1491 kernel/time/timer.c unsigned pos, start = offset + clk; start 1494 kernel/time/timer.c pos = find_next_bit(base->pending_map, end, start); start 1496 kernel/time/timer.c return pos - start; start 1498 kernel/time/timer.c pos = find_next_bit(base->pending_map, start, offset); start 1499 kernel/time/timer.c return pos < start ? pos + LVL_SIZE - start : -1; start 363 kernel/time/timer_list.c .start = timer_list_start, start 631 kernel/trace/blktrace.c static int __blk_trace_startstop(struct request_queue *q, int start) start 646 kernel/trace/blktrace.c if (start) { start 673 kernel/trace/blktrace.c int blk_trace_startstop(struct request_queue *q, int start) start 678 kernel/trace/blktrace.c ret = __blk_trace_startstop(q, start); start 701 kernel/trace/blktrace.c int ret, start = 0; start 722 kernel/trace/blktrace.c start = 1; start 725 kernel/trace/blktrace.c ret = __blk_trace_startstop(q, start); start 1592 kernel/trace/blktrace.c .start = blk_tracer_start, start 346 kernel/trace/fgraph.c int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; start 355 kernel/trace/fgraph.c start = 0; start 364 kernel/trace/fgraph.c if (start == end) { start 376 kernel/trace/fgraph.c t->ret_stack = ret_stack_list[start++]; start 383 kernel/trace/fgraph.c for (i = start; i < end; i++) start 409 kernel/trace/ftrace.c struct ftrace_profile_page *start; start 458 kernel/trace/ftrace.c if (!stat || !stat->start) start 461 kernel/trace/ftrace.c return function_stat_next(&stat->start->records[0], 0); start 576 kernel/trace/ftrace.c pg = stat->pages = stat->start; start 616 kernel/trace/ftrace.c pg = stat->start = stat->pages; start 630 kernel/trace/ftrace.c pg = stat->start; start 639 kernel/trace/ftrace.c stat->start = NULL; start 1549 kernel/trace/ftrace.c unsigned long ftrace_location_range(unsigned long start, unsigned long end) start 1555 kernel/trace/ftrace.c key.ip = start; start 1560 kernel/trace/ftrace.c start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) start 1596 kernel/trace/ftrace.c int ftrace_text_reserved(const void *start, const void *end) start 1600 kernel/trace/ftrace.c ret = ftrace_location_range((unsigned long)start, start 2909 kernel/trace/ftrace.c u64 start, stop; start 2914 kernel/trace/ftrace.c start = ftrace_now(raw_smp_processor_id()); start 2954 kernel/trace/ftrace.c ftrace_update_time = stop - start; start 3480 kernel/trace/ftrace.c .start = t_start, start 5213 kernel/trace/ftrace.c .start = g_start, start 5575 kernel/trace/ftrace.c unsigned long *start, start 5587 kernel/trace/ftrace.c count = end - start; start 5592 kernel/trace/ftrace.c sort(start, count, sizeof(*start), start 5623 kernel/trace/ftrace.c p = start; start 5946 kernel/trace/ftrace.c unsigned long start, unsigned long end) start 5955 kernel/trace/ftrace.c mod_map->start_addr = start; start 6059 kernel/trace/ftrace.c unsigned long start, unsigned long end) start 6120 kernel/trace/ftrace.c unsigned long start = (unsigned long)(start_ptr); start 6133 kernel/trace/ftrace.c key.ip = start; start 6144 kernel/trace/ftrace.c mod_map = allocate_ftrace_mod_map(mod, start, end); start 6148 kernel/trace/ftrace.c start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) start 6190 kernel/trace/ftrace.c void *start = (void *)(&__init_begin); start 6193 kernel/trace/ftrace.c ftrace_free_mem(NULL, start, end); start 6565 kernel/trace/ftrace.c .start = fpid_start, start 28 kernel/trace/preemptirq_delay_test.c u64 start, end; start 29 kernel/trace/preemptirq_delay_test.c start = trace_clock_local(); start 34 kernel/trace/preemptirq_delay_test.c } while ((end - start) < (time * 1000)); start 3018 kernel/trace/ring_buffer.c struct buffer_page *start; start 3033 kernel/trace/ring_buffer.c start = bpage; start 3040 kernel/trace/ring_buffer.c } while (bpage != start); start 4064 kernel/trace/trace.c .start = s_start, start 4372 kernel/trace/trace.c .start = t_start, start 5077 kernel/trace/trace.c .start = saved_tgids_start, start 5157 kernel/trace/trace.c .start = saved_cmdlines_start, start 5325 kernel/trace/trace.c .start = eval_map_start, start 5357 kernel/trace/trace.c trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, start 5365 kernel/trace/trace.c stop = start + len; start 5397 kernel/trace/trace.c for (map = start; (unsigned long)map < (unsigned long)stop; map++) { start 5415 kernel/trace/trace.c struct trace_eval_map **start, int len) { } start 5419 kernel/trace/trace.c struct trace_eval_map **start, int len) start 5426 kernel/trace/trace.c map = start; start 5430 kernel/trace/trace.c trace_insert_eval_map_file(mod, start, len); start 7147 kernel/trace/trace.c .start = tracing_err_log_seq_start, start 8193 kernel/trace/trace.c if (tr->current_trace->start) start 8194 kernel/trace/trace.c tr->current_trace->start(tr); start 470 kernel/trace/trace.h void (*start)(struct trace_array *tr); start 639 kernel/trace/trace.h static __always_inline int trace_test_and_set_recursion(int start, int max) start 648 kernel/trace/trace.h bit = trace_get_context_bit() + start; start 39 kernel/trace/trace_benchmark.c u64 start; start 53 kernel/trace/trace_benchmark.c start = trace_clock_local(); start 60 kernel/trace/trace_benchmark.c delta = stop - start; start 129 kernel/trace/trace_dynevent.c .start = dyn_event_seq_start, start 1287 kernel/trace/trace_events.c .start = f_start, start 1660 kernel/trace/trace_events.c .start = t_start, start 1667 kernel/trace/trace_events.c .start = s_start, start 1674 kernel/trace/trace_events.c .start = p_start, start 2374 kernel/trace/trace_events.c #define for_each_event(event, start, end) \ start 2375 kernel/trace/trace_events.c for (event = start; \ start 2383 kernel/trace/trace_events.c struct trace_event_call **call, **start, **end; start 2395 kernel/trace/trace_events.c start = mod->trace_events; start 2398 kernel/trace/trace_events.c for_each_event(call, start, end) { start 700 kernel/trace/trace_events_hist.c char buf[4], *end, *start; start 704 kernel/trace/trace_events_hist.c start = strstr(type, "char["); start 705 kernel/trace/trace_events_hist.c if (start == NULL) start 707 kernel/trace/trace_events_hist.c start += sizeof("char[") - 1; start 710 kernel/trace/trace_events_hist.c if (!end || end < start) start 713 kernel/trace/trace_events_hist.c len = end - start; start 717 kernel/trace/trace_events_hist.c strncpy(buf, start, len); start 1492 kernel/trace/trace_events_hist.c .start = dyn_event_seq_start, start 170 kernel/trace/trace_events_trigger.c .start = trigger_start, start 271 kernel/trace/trace_functions.c .start = function_trace_start, start 170 kernel/trace/trace_hwlat.c time_type start, t1, t2, last_t2; start 188 kernel/trace/trace_hwlat.c start = time_get(); /* start timestamp */ start 208 kernel/trace/trace_hwlat.c total = time_to_us(time_sub(t2, start)); /* sample width */ start 617 kernel/trace/trace_hwlat.c .start = hwlat_tracer_start, start 645 kernel/trace/trace_irqsoff.c .start = irqsoff_tracer_start, start 695 kernel/trace/trace_irqsoff.c .start = irqsoff_tracer_start, start 730 kernel/trace/trace_irqsoff.c .start = irqsoff_tracer_start, start 952 kernel/trace/trace_kprobe.c .start = dyn_event_seq_start, start 1010 kernel/trace/trace_kprobe.c .start = dyn_event_seq_start, start 66 kernel/trace/trace_mmiotrace.c resource_size_t start, end; start 73 kernel/trace/trace_mmiotrace.c start = dev->resource[i].start; start 75 kernel/trace/trace_mmiotrace.c (unsigned long long)(start | start 79 kernel/trace/trace_mmiotrace.c start = dev->resource[i].start; start 82 kernel/trace/trace_mmiotrace.c dev->resource[i].start < dev->resource[i].end ? start 83 kernel/trace/trace_mmiotrace.c (unsigned long long)(end - start) + 1 : 0); start 281 kernel/trace/trace_mmiotrace.c .start = mmio_trace_start, start 53 kernel/trace/trace_printk.c void hold_module_trace_bprintk_format(const char **start, const char **end) start 59 kernel/trace/trace_printk.c if (start != end) start 63 kernel/trace/trace_printk.c for (iter = start; iter < end; iter++) { start 93 kernel/trace/trace_printk.c const char **start = mod->trace_bprintk_fmt_start; start 94 kernel/trace/trace_printk.c const char **end = start + mod->num_trace_bprintk_fmt; start 97 kernel/trace/trace_printk.c hold_module_trace_bprintk_format(start, end); start 343 kernel/trace/trace_printk.c .start = t_start, start 749 kernel/trace/trace_sched_wakeup.c .start = wakeup_tracer_start, start 769 kernel/trace/trace_sched_wakeup.c .start = wakeup_tracer_start, start 789 kernel/trace/trace_sched_wakeup.c .start = wakeup_tracer_start, start 996 kernel/trace/trace_selftest.c trace->start(tr); start 157 kernel/trace/trace_stack.c unsigned long this_size, flags; unsigned long *p, *top, *start; start 212 kernel/trace/trace_stack.c start = stack; start 214 kernel/trace/trace_stack.c (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); start 227 kernel/trace/trace_stack.c p = start; start 240 kernel/trace/trace_stack.c start = p + 1; start 471 kernel/trace/trace_stack.c .start = t_start, start 228 kernel/trace/trace_stat.c .start = stat_seq_start, start 83 kernel/trace/trace_syscalls.c struct syscall_metadata **start; start 88 kernel/trace/trace_syscalls.c start = __start_syscalls_metadata; start 95 kernel/trace/trace_syscalls.c for ( ; start < stop; start++) { start 96 kernel/trace/trace_syscalls.c if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) start 97 kernel/trace/trace_syscalls.c return *start; start 756 kernel/trace/trace_uprobe.c .start = dyn_event_seq_start, start 811 kernel/trace/trace_uprobe.c .start = dyn_event_seq_start, start 996 kernel/trace/tracing_map.c unsigned i, start = 0, n_sub = 1; start 1019 kernel/trace/tracing_map.c start = i + 1; start 1025 kernel/trace/tracing_map.c sort(&entries[start], n_sub, start 1030 kernel/trace/tracing_map.c start = i + 1; start 680 kernel/user_namespace.c return seq->op->start(seq, pos); start 689 kernel/user_namespace.c .start = uid_m_start, start 696 kernel/user_namespace.c .start = gid_m_start, start 703 kernel/user_namespace.c .start = projid_m_start, start 270 lib/bitmap.c void __bitmap_set(unsigned long *map, unsigned int start, int len) start 272 lib/bitmap.c unsigned long *p = map + BIT_WORD(start); start 273 lib/bitmap.c const unsigned int size = start + len; start 274 lib/bitmap.c int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); start 275 lib/bitmap.c unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); start 291 lib/bitmap.c void __bitmap_clear(unsigned long *map, unsigned int start, int len) start 293 lib/bitmap.c unsigned long *p = map + BIT_WORD(start); start 294 lib/bitmap.c const unsigned int size = start + len; start 295 lib/bitmap.c int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); start 296 lib/bitmap.c unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); start 327 lib/bitmap.c unsigned long start, start 334 lib/bitmap.c index = find_next_zero_bit(map, size, start); start 344 lib/bitmap.c start = i + 1; start 508 lib/bitmap.c unsigned int start; start 517 lib/bitmap.c unsigned int start; start 522 lib/bitmap.c for (start = r->start; start <= r->end; start += r->group_len) start 523 lib/bitmap.c bitmap_set(bitmap, start, min(r->end - start + 1, r->off)); start 530 lib/bitmap.c if (r->start > r->end || r->group_len == 0 || r->off > r->group_len) start 580 lib/bitmap.c str = bitmap_getnum(str, &r->start); start 610 lib/bitmap.c r->end = r->start; start 375 lib/btree.c static int getfill(struct btree_geo *geo, unsigned long *node, int start) start 379 lib/btree.c for (i = start; i < geo->no_pairs; i++) start 209 lib/bug.c static void clear_once_table(struct bug_entry *start, struct bug_entry *end) start 213 lib/bug.c for (bug = start; bug < end; bug++) start 77 lib/cpumask.c int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) start 84 lib/cpumask.c if (wrap && n < start && next >= start) { start 553 lib/crc32test.c u32 start; /* random 6 bit offset in buf */ start 681 lib/crc32test.c test[i].start, test[i].length); start 691 lib/crc32test.c test[i].start, test[i].length)) start 719 lib/crc32test.c crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start, start 726 lib/crc32test.c test[i].start, len1); start 727 lib/crc32test.c crc2 = __crc32c_le(0, test_buf + test[i].start + start 763 lib/crc32test.c test[i].start, test[i].length); start 766 lib/crc32test.c test[i].start, test[i].length); start 776 lib/crc32test.c test[i].start, test[i].length)) start 780 lib/crc32test.c test[i].start, test[i].length)) start 809 lib/crc32test.c crc_full = crc32_le(test[i].crc, test_buf + test[i].start, start 816 lib/crc32test.c test[i].start, len1); start 817 lib/crc32test.c crc2 = crc32_le(0, test_buf + test[i].start + start 168 lib/devres.c if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) { start 173 lib/devres.c dest_ptr = devm_ioremap(dev, res->start, size); start 176 lib/devres.c devm_release_mem_region(dev, res->start, size); start 57 lib/dim/dim.c void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, start 61 lib/dim/dim.c u32 delta_us = ktime_us_delta(end->time, start->time); start 62 lib/dim/dim.c u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); start 64 lib/dim/dim.c start->byte_ctr); start 66 lib/dim/dim.c start->comp_ctr); start 850 lib/dynamic_debug.c .start = ddebug_proc_start, start 57 lib/error-inject.c static void populate_error_injection_list(struct error_injection_entry *start, start 66 lib/error-inject.c for (iter = start; iter < end; iter++) { start 198 lib/error-inject.c .start = ei_seq_start, start 64 lib/extable.c void sort_extable(struct exception_table_entry *start, start 67 lib/extable.c sort(start, finish - start, sizeof(struct exception_table_entry), start 32 lib/find_bit.c unsigned long start, unsigned long invert) start 36 lib/find_bit.c if (unlikely(start >= nbits)) start 39 lib/find_bit.c tmp = addr1[start / BITS_PER_LONG]; start 41 lib/find_bit.c tmp &= addr2[start / BITS_PER_LONG]; start 45 lib/find_bit.c tmp &= BITMAP_FIRST_WORD_MASK(start); start 46 lib/find_bit.c start = round_down(start, BITS_PER_LONG); start 49 lib/find_bit.c start += BITS_PER_LONG; start 50 lib/find_bit.c if (start >= nbits) start 53 lib/find_bit.c tmp = addr1[start / BITS_PER_LONG]; start 55 lib/find_bit.c tmp &= addr2[start / BITS_PER_LONG]; start 59 lib/find_bit.c return min(start + __ffs(tmp), nbits); start 155 lib/find_bit.c unsigned long start, unsigned long invert) start 159 lib/find_bit.c if (unlikely(start >= nbits)) start 162 lib/find_bit.c tmp = addr1[start / BITS_PER_LONG]; start 164 lib/find_bit.c tmp &= addr2[start / BITS_PER_LONG]; start 168 lib/find_bit.c tmp &= swab(BITMAP_FIRST_WORD_MASK(start)); start 169 lib/find_bit.c start = round_down(start, BITS_PER_LONG); start 172 lib/find_bit.c start += BITS_PER_LONG; start 173 lib/find_bit.c if (start >= nbits) start 176 lib/find_bit.c tmp = addr1[start / BITS_PER_LONG]; start 178 lib/find_bit.c tmp &= addr2[start / BITS_PER_LONG]; start 182 lib/find_bit.c return min(start + __ffs(swab(tmp)), nbits); start 84 lib/genalloc.c static int bitmap_set_ll(unsigned long *map, int start, int nr) start 86 lib/genalloc.c unsigned long *p = map + BIT_WORD(start); start 87 lib/genalloc.c const int size = start + nr; start 88 lib/genalloc.c int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); start 89 lib/genalloc.c unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); start 119 lib/genalloc.c static int bitmap_clear_ll(unsigned long *map, int start, int nr) start 121 lib/genalloc.c unsigned long *p = map + BIT_WORD(start); start 122 lib/genalloc.c const int size = start + nr; start 123 lib/genalloc.c int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); start 124 lib/genalloc.c unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); start 551 lib/genalloc.c bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, start 555 lib/genalloc.c unsigned long end = start + size - 1; start 560 lib/genalloc.c if (start >= chunk->start_addr && start <= chunk->end_addr) { start 644 lib/genalloc.c unsigned long start, unsigned int nr, void *data, start 647 lib/genalloc.c return bitmap_find_next_zero_area(map, size, start, nr, 0); start 662 lib/genalloc.c unsigned long start, unsigned int nr, void *data, start 674 lib/genalloc.c return bitmap_find_next_zero_area_off(map, size, start, nr, start 689 lib/genalloc.c unsigned long start, unsigned int nr, void *data, start 704 lib/genalloc.c start + offset_bit, nr, 0); start 723 lib/genalloc.c unsigned long size, unsigned long start, start 729 lib/genalloc.c return bitmap_find_next_zero_area(map, size, start, nr, align_mask); start 747 lib/genalloc.c unsigned long start, unsigned int nr, void *data, start 754 lib/genalloc.c index = bitmap_find_next_zero_area(map, size, start, nr, 0); start 79 lib/idr.c int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) start 81 lib/idr.c u32 id = start; start 84 lib/idr.c if (WARN_ON_ONCE(start < 0)) start 117 lib/idr.c int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) start 122 lib/idr.c if ((int)id < start) start 123 lib/idr.c id = start; start 126 lib/idr.c if ((err == -ENOSPC) && (id > start)) { start 127 lib/idr.c id = start; start 7 lib/interval_tree.c #define START(node) ((node)->start) start 30 lib/interval_tree_test.c search(struct rb_root_cached *root, unsigned long start, unsigned long last) start 35 lib/interval_tree_test.c for (node = interval_tree_iter_first(root, start, last); node; start 36 lib/interval_tree_test.c node = interval_tree_iter_next(node, start, last)) start 49 lib/interval_tree_test.c nodes[i].start = a; start 109 lib/interval_tree_test.c unsigned long start = search_all ? 0 : queries[j]; start 112 lib/interval_tree_test.c results += search(&root, start, last); start 10 lib/iommu-helper.c unsigned long start, unsigned int nr, start 19 lib/iommu-helper.c index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); start 22 lib/iommu-helper.c start = ALIGN(shift + index, boundary_size) - shift; start 214 lib/ioremap.c unsigned long start; start 221 lib/ioremap.c start = addr; start 230 lib/ioremap.c flush_cache_vmap(start, end); start 1235 lib/iov_iter.c size_t *start) start 1238 lib/iov_iter.c ssize_t n = push_pipe(i, maxsize, &idx, start); start 1243 lib/iov_iter.c n += *start; start 1255 lib/iov_iter.c size_t *start) start 1267 lib/iov_iter.c data_start(i, &idx, start); start 1270 lib/iov_iter.c capacity = min(npages,maxpages) * PAGE_SIZE - *start; start 1272 lib/iov_iter.c return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start); start 1277 lib/iov_iter.c size_t *start) start 1283 lib/iov_iter.c return pipe_get_pages(i, pages, maxsize, maxpages, start); start 1289 lib/iov_iter.c size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); start 1302 lib/iov_iter.c return (res == n ? len : res * PAGE_SIZE) - *start; start 1305 lib/iov_iter.c *start = v.bv_offset; start 1323 lib/iov_iter.c size_t *start) start 1336 lib/iov_iter.c data_start(i, &idx, start); start 1339 lib/iov_iter.c n = npages * PAGE_SIZE - *start; start 1343 lib/iov_iter.c npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); start 1347 lib/iov_iter.c n = __pipe_get_pages(i, maxsize, p, idx, start); start 1357 lib/iov_iter.c size_t *start) start 1365 lib/iov_iter.c return pipe_get_pages_alloc(i, pages, maxsize, start); start 1371 lib/iov_iter.c size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); start 1387 lib/iov_iter.c return (res == n ? len : res * PAGE_SIZE) - *start; start 1390 lib/iov_iter.c *start = v.bv_offset; start 36 lib/logic_pio.c resource_size_t start; start 45 lib/logic_pio.c start = new_range->hw_start; start 57 lib/logic_pio.c if (start >= range->hw_start + range->size || start 61 lib/lz4/lz4hc_compress.c static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start) start 66 lib/lz4/lz4hc_compress.c hc4->base = start - 64 * KB; start 67 lib/lz4/lz4hc_compress.c hc4->end = start; start 68 lib/lz4/lz4hc_compress.c hc4->dictBase = start - 64 * KB; start 92 lib/math/prime_numbers.c unsigned long start, start 98 lib/math/prime_numbers.c if (m < start) start 99 lib/math/prime_numbers.c m = roundup(start, x); start 812 lib/nlattr.c void *start; start 814 lib/nlattr.c start = __nla_reserve_nohdr(skb, attrlen); start 815 lib/nlattr.c memcpy(start, data, attrlen); start 33 lib/pci_iomap.c resource_size_t start = pci_resource_start(dev, bar); start 37 lib/pci_iomap.c if (len <= offset || !start) start 40 lib/pci_iomap.c start += offset; start 44 lib/pci_iomap.c return __pci_ioport_map(dev, start, len); start 46 lib/pci_iomap.c return ioremap(start, len); start 73 lib/pci_iomap.c resource_size_t start = pci_resource_start(dev, bar); start 81 lib/pci_iomap.c if (len <= offset || !start) start 85 lib/pci_iomap.c start += offset; start 90 lib/pci_iomap.c return ioremap_wc(start, len); start 1491 lib/radix-tree.c unsigned long maxindex, start = iter->next_index; start 1497 lib/radix-tree.c start = max(start, maxindex + 1); start 1498 lib/radix-tree.c if (start > max) start 1501 lib/radix-tree.c if (start > maxindex) { start 1502 lib/radix-tree.c int error = radix_tree_extend(root, gfp, start, shift); start 1508 lib/radix-tree.c if (start == 0 && shift == 0) start 1527 lib/radix-tree.c offset = radix_tree_descend(node, &child, start); start 1531 lib/radix-tree.c start = next_index(start, node, offset); start 1532 lib/radix-tree.c if (start > max || start == 0) start 1546 lib/radix-tree.c iter->index = start; start 1548 lib/radix-tree.c iter->next_index = 1 + min(max, (start | node_maxindex(node))); start 152 lib/raid6/algos.c int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ start 197 lib/raid6/algos.c (*algo)->xor_syndrome(disks, start, stop, start 84 lib/raid6/avx2.c static void raid6_avx21_xor_syndrome(int disks, int start, int stop, start 104 lib/raid6/avx2.c for (z = z0-1 ; z >= start ; z--) { start 115 lib/raid6/avx2.c for (z = start-1 ; z >= 0 ; z--) { start 194 lib/raid6/avx2.c static void raid6_avx22_xor_syndrome(int disks, int start, int stop, start 217 lib/raid6/avx2.c for (z = z0-1 ; z >= start ; z--) { start 237 lib/raid6/avx2.c for (z = start-1 ; z >= 0 ; z--) { start 355 lib/raid6/avx2.c static void raid6_avx24_xor_syndrome(int disks, int start, int stop, start 384 lib/raid6/avx2.c for (z = z0-1 ; z >= start ; z--) { start 426 lib/raid6/avx2.c for (z = start-1 ; z >= 0 ; z--) { start 101 lib/raid6/avx512.c static void raid6_avx5121_xor_syndrome(int disks, int start, int stop, start 124 lib/raid6/avx512.c for (z = z0-1 ; z >= start ; z--) { start 138 lib/raid6/avx512.c for (z = start-1 ; z >= 0 ; z--) { start 233 lib/raid6/avx512.c static void raid6_avx5122_xor_syndrome(int disks, int start, int stop, start 260 lib/raid6/avx512.c for (z = z0-1 ; z >= start ; z--) { start 283 lib/raid6/avx512.c for (z = start-1 ; z >= 0 ; z--) { start 423 lib/raid6/avx512.c static void raid6_avx5124_xor_syndrome(int disks, int start, int stop, start 458 lib/raid6/avx512.c for (z = z0-1 ; z >= start ; z--) { start 507 lib/raid6/avx512.c for (z = start-1 ; z >= 0 ; z--) { start 41 lib/raid6/neon.c int start, int stop, \ start 48 lib/raid6/neon.c start, stop, (unsigned long)bytes, ptrs); \ start 87 lib/raid6/sse2.c static void raid6_sse21_xor_syndrome(int disks, int start, int stop, start 107 lib/raid6/sse2.c for ( z = z0-1 ; z >= start ; z-- ) { start 118 lib/raid6/sse2.c for ( z = start-1 ; z >= 0 ; z-- ) { start 198 lib/raid6/sse2.c static void raid6_sse22_xor_syndrome(int disks, int start, int stop, start 221 lib/raid6/sse2.c for ( z = z0-1 ; z >= start ; z-- ) { start 240 lib/raid6/sse2.c for ( z = start-1 ; z >= 0 ; z-- ) { start 364 lib/raid6/sse2.c static void raid6_sse24_xor_syndrome(int disks, int start, int stop, start 393 lib/raid6/sse2.c for ( z = z0-1 ; z >= start ; z-- ) { start 432 lib/raid6/sse2.c for ( z = start-1 ; z >= 0 ; z-- ) { start 29 lib/raid6/test/test.c static void makedata(int start, int stop) start 33 lib/raid6/test/test.c for (i = start; i <= stop; i++) { start 1010 lib/string.c static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes) start 1013 lib/string.c if (*start != value) start 1014 lib/string.c return (void *)start; start 1015 lib/string.c start++; start 1030 lib/string.c void *memchr_inv(const void *start, int c, size_t bytes) start 1037 lib/string.c return check_bytes8(start, value, bytes); start 1051 lib/string.c prefix = (unsigned long)start % 8; start 1056 lib/string.c r = check_bytes8(start, value, prefix); start 1059 lib/string.c start += prefix; start 1066 lib/string.c if (*(u64 *)start != value64) start 1067 lib/string.c return check_bytes8(start, value, 8); start 1068 lib/string.c start += 8; start 1072 lib/string.c return check_bytes8(start, value, bytes % 8); start 374 lib/test_bitmap.c unsigned int start, nbits; start 376 lib/test_bitmap.c for (start = 0; start < 1024; start += 8) { start 377 lib/test_bitmap.c for (nbits = 0; nbits < 1024 - start; nbits += 8) { start 381 lib/test_bitmap.c bitmap_set(bmap1, start, nbits); start 382 lib/test_bitmap.c __bitmap_set(bmap2, start, nbits); start 384 lib/test_bitmap.c printk("set not equal %d %d\n", start, nbits); start 388 lib/test_bitmap.c printk("set not __equal %d %d\n", start, nbits); start 392 lib/test_bitmap.c bitmap_clear(bmap1, start, nbits); start 393 lib/test_bitmap.c __bitmap_clear(bmap2, start, nbits); start 395 lib/test_bitmap.c printk("clear not equal %d %d\n", start, nbits); start 399 lib/test_bitmap.c printk("clear not __equal %d %d\n", start, start 6660 lib/test_bpf.c u64 start, finish; start 6664 lib/test_bpf.c start = ktime_get_ns(); start 6672 lib/test_bpf.c *duration = finish - start; start 214 lib/test_rhashtable.c s64 start, end; start 221 lib/test_rhashtable.c start = ktime_get_ns(); start 261 lib/test_rhashtable.c pr_info(" Duration of test: %lld ns\n", end - start); start 263 lib/test_rhashtable.c return end - start; start 50 lib/test_user_copy.c size_t start, end, i, zero_start, zero_end; start 62 lib/test_user_copy.c start = PAGE_SIZE - (size / 2); start 64 lib/test_user_copy.c kmem += start; start 65 lib/test_user_copy.c umem += start; start 90 lib/test_user_copy.c for (start = 0; start <= size; start++) { start 91 lib/test_user_copy.c for (end = start; end <= size; end++) { start 92 lib/test_user_copy.c size_t len = end - start; start 93 lib/test_user_copy.c int retval = check_zeroed_user(umem + start, len); start 94 lib/test_user_copy.c int expected = is_zeroed(kmem + start, len); start 98 lib/test_user_copy.c retval, expected, start, end); start 360 lib/test_vmalloc.c unsigned long start; start 405 lib/test_vmalloc.c t->start = get_cycles(); start 530 lib/test_vmalloc.c cpu, t->stop - t->start); start 842 lib/test_xarray.c static noinline void __check_store_iter(struct xarray *xa, unsigned long start, start 845 lib/test_xarray.c XA_STATE_ORDER(xas, xa, start, order); start 853 lib/test_xarray.c XA_BUG_ON(xa, entry < xa_mk_index(start)); start 854 lib/test_xarray.c XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); start 857 lib/test_xarray.c xas_store(&xas, xa_mk_index(start)); start 865 lib/test_xarray.c XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); start 866 lib/test_xarray.c XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != start 867 lib/test_xarray.c xa_mk_index(start)); start 868 lib/test_xarray.c xa_erase_index(xa, start); start 1045 lib/vsprintf.c p = number(p, pend, res->start, *specp); start 1046 lib/vsprintf.c if (res->start != res->end) { start 2227 lib/vsprintf.c const char *start = fmt; start 2258 lib/vsprintf.c if (fmt != start || !*fmt) start 2259 lib/vsprintf.c return fmt - start; start 2290 lib/vsprintf.c return ++fmt - start; start 2305 lib/vsprintf.c return ++fmt - start; start 2331 lib/vsprintf.c return ++fmt - start; start 2335 lib/vsprintf.c return ++fmt - start; start 2339 lib/vsprintf.c return ++fmt - start; start 2343 lib/vsprintf.c return ++fmt - start; start 2375 lib/vsprintf.c return fmt - start; start 2398 lib/vsprintf.c return ++fmt - start; start 1962 lib/xarray.c unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, start 1965 lib/xarray.c XA_STATE(xas, xa, start); start 49 lib/xz/xz_dec_lzma2.c size_t start; start 293 lib/xz/xz_dec_lzma2.c dict->start = 0; start 404 lib/xz/xz_dec_lzma2.c dict->start = dict->pos; start 418 lib/xz/xz_dec_lzma2.c size_t copy_size = dict->pos - dict->start; start 424 lib/xz/xz_dec_lzma2.c memcpy(b->out + b->out_pos, dict->buf + dict->start, start 428 lib/xz/xz_dec_lzma2.c dict->start = dict->pos; start 80 lib/zlib_deflate/deflate.c static void check_match (deflate_state *s, IPos start, IPos match, start 699 lib/zlib_deflate/deflate.c IPos start, start 706 lib/zlib_deflate/deflate.c (char *)s->window + start, length) != EQUAL) { start 708 lib/zlib_deflate/deflate.c start, match, length); start 710 lib/zlib_deflate/deflate.c fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); start 715 lib/zlib_deflate/deflate.c fprintf(stderr,"\\[%d,%d]", start-match, length); start 716 lib/zlib_deflate/deflate.c do { putc(s->window[start++], stderr); } while (--length != 0); start 720 lib/zlib_deflate/deflate.c # define check_match(s, start, match, length) start 88 lib/zlib_inflate/inffast.c void inflate_fast(z_streamp strm, unsigned start) start 121 lib/zlib_inflate/inffast.c beg = out - (start - strm->avail_out); start 11 lib/zlib_inflate/inffast.h void inflate_fast (z_streamp strm, unsigned start); start 107 lib/zstd/bitstream.h const char *start; start 247 lib/zstd/bitstream.h bitD->start = (const char *)srcBuffer; start 257 lib/zstd/bitstream.h bitD->start = (const char *)srcBuffer; start 258 lib/zstd/bitstream.h bitD->ptr = bitD->start; start 259 lib/zstd/bitstream.h bitD->bitContainer = *(const BYTE *)(bitD->start); start 286 lib/zstd/bitstream.h ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } start 288 lib/zstd/bitstream.h ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; } start 346 lib/zstd/bitstream.h if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { start 352 lib/zstd/bitstream.h if (bitD->ptr == bitD->start) { start 360 lib/zstd/bitstream.h if (bitD->ptr - nbBytes < bitD->start) { start 361 lib/zstd/bitstream.h nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ start 376 lib/zstd/bitstream.h return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8)); start 1929 lib/zstd/compress.c const BYTE *start = ip + 1; start 1944 lib/zstd/compress.c matchLength = ml2, start = ip, offset = offsetFound; start 1961 lib/zstd/compress.c matchLength = mlRep, offset = 0, start = ip; start 1969 lib/zstd/compress.c matchLength = ml2, offset = offset2, start = ip; start 1982 lib/zstd/compress.c matchLength = ml2, offset = 0, start = ip; start 1990 lib/zstd/compress.c matchLength = ml2, offset = offset2, start = ip; start 2005 lib/zstd/compress.c while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) && start 2006 lib/zstd/compress.c (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */ start 2008 lib/zstd/compress.c start--; start 2018 lib/zstd/compress.c size_t const litLength = start - anchor; start 2020 lib/zstd/compress.c anchor = ip = start + matchLength; start 2090 lib/zstd/compress.c const BYTE *start = ip + 1; start 2114 lib/zstd/compress.c matchLength = ml2, start = ip, offset = offsetFound; start 2142 lib/zstd/compress.c matchLength = repLength, offset = 0, start = ip; start 2153 lib/zstd/compress.c matchLength = ml2, offset = offset2, start = ip; start 2177 lib/zstd/compress.c matchLength = repLength, offset = 0, start = ip; start 2188 lib/zstd/compress.c matchLength = ml2, offset = offset2, start = ip; start 2198 lib/zstd/compress.c U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE)); start 2201 lib/zstd/compress.c while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) { start 2202 lib/zstd/compress.c start--; start 2212 lib/zstd/compress.c size_t const litLength = start - anchor; start 2214 lib/zstd/compress.c anchor = ip = start + matchLength; start 231 lib/zstd/fse_compress.c unsigned start = charnum; start 234 lib/zstd/fse_compress.c while (charnum >= start + 24) { start 235 lib/zstd/fse_compress.c start += 24; start 244 lib/zstd/fse_compress.c while (charnum >= start + 3) { start 245 lib/zstd/fse_compress.c start += 3; start 249 lib/zstd/fse_compress.c bitStream += (charnum - start) << bitCount; start 421 lib/zstd/huf_decompress.c const U32 start = rankVal[weight]; start 422 lib/zstd/huf_decompress.c U32 i = start; start 423 lib/zstd/huf_decompress.c const U32 end = start + length; start 455 lib/zstd/huf_decompress.c const U32 start = rankVal[weight]; start 464 lib/zstd/huf_decompress.c HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank, start 472 lib/zstd/huf_decompress.c U32 const end = start + length; start 474 lib/zstd/huf_decompress.c for (u = start; u < end; u++) start 1097 mm/backing-dev.c unsigned long start = jiffies; start 1106 mm/backing-dev.c jiffies_to_usecs(jiffies - start)); start 1128 mm/backing-dev.c unsigned long start = jiffies; start 1140 mm/backing-dev.c ret = timeout - (jiffies - start); start 1154 mm/backing-dev.c jiffies_to_usecs(jiffies - start)); start 382 mm/cma.c unsigned long start = 0; start 389 mm/cma.c next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); start 398 mm/cma.c start = next_zero_bit + nr_zero; start 422 mm/cma.c unsigned long start = 0; start 448 mm/cma.c bitmap_maxno, start, bitmap_count, mask, start 479 mm/cma.c start = bitmap_no + mask + 1; start 53 mm/cma_debug.c unsigned long start, end = 0; start 58 mm/cma_debug.c start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); start 59 mm/cma_debug.c if (start >= bitmap_maxno) start 61 mm/cma_debug.c end = find_next_bit(cma->bitmap, bitmap_maxno, start); start 62 mm/cma_debug.c maxchunk = max(end - start, maxchunk); start 405 mm/filemap.c int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, start 412 mm/filemap.c .range_start = start, start 438 mm/filemap.c int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, start 441 mm/filemap.c return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); start 1725 mm/filemap.c pgoff_t start, unsigned int nr_entries, start 1728 mm/filemap.c XA_STATE(xas, &mapping->i_pages, start); start 1791 mm/filemap.c unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, start 1795 mm/filemap.c XA_STATE(xas, &mapping->i_pages, *start); start 1819 mm/filemap.c *start = xas.xa_index + 1; start 1836 mm/filemap.c *start = (pgoff_t)-1; start 1838 mm/filemap.c *start = end + 1; start 2419 mm/filemap.c ra->start = max_t(long, 0, offset - ra->ra_pages / 2); start 34 mm/frame_vector.c int get_vaddr_frames(unsigned long start, unsigned int nr_frames, start 49 mm/frame_vector.c start = untagged_addr(start); start 53 mm/frame_vector.c vma = find_vma_intersection(mm, start, start + 1); start 75 mm/frame_vector.c ret = get_user_pages_locked(start, nr_frames, start 85 mm/frame_vector.c while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { start 86 mm/frame_vector.c err = follow_pfn(vma, start, &nums[ret]); start 92 mm/frame_vector.c start += PAGE_SIZE; start 99 mm/frame_vector.c if (ret >= nr_frames || start < vma->vm_end) start 101 mm/frame_vector.c vma = find_vma_intersection(mm, start, start + 1); start 789 mm/gup.c unsigned long start, unsigned long nr_pages, start 800 mm/gup.c start = untagged_addr(start); start 818 mm/gup.c if (!vma || start >= vma->vm_end) { start 819 mm/gup.c vma = find_extend_vma(mm, start); start 820 mm/gup.c if (!vma && in_gate_area(mm, start)) { start 821 mm/gup.c ret = get_gate_page(mm, start & PAGE_MASK, start 838 mm/gup.c &start, &nr_pages, i, start 858 mm/gup.c page = follow_page_mask(vma, start, foll_flags, &ctx); start 860 mm/gup.c ret = faultin_page(tsk, vma, start, &foll_flags, start 888 mm/gup.c flush_anon_page(vma, page, start); start 897 mm/gup.c page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); start 901 mm/gup.c start += page_increm * PAGE_SIZE; start 1015 mm/gup.c unsigned long start, start 1038 mm/gup.c ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, start 1071 mm/gup.c start += ret << PAGE_SHIFT; start 1081 mm/gup.c ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, start 1095 mm/gup.c start += PAGE_SIZE; start 1165 mm/gup.c unsigned long start, unsigned long nr_pages, start 1178 mm/gup.c return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, start 1204 mm/gup.c unsigned long start, unsigned long end, int *nonblocking) start 1207 mm/gup.c unsigned long nr_pages = (end - start) / PAGE_SIZE; start 1210 mm/gup.c VM_BUG_ON(start & ~PAGE_MASK); start 1212 mm/gup.c VM_BUG_ON_VMA(start < vma->vm_start, vma); start 1238 mm/gup.c return __get_user_pages(current, mm, start, nr_pages, gup_flags, start 1249 mm/gup.c int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) start 1257 mm/gup.c end = start + len; start 1259 mm/gup.c for (nstart = start; nstart < end; nstart = nend) { start 1332 mm/gup.c struct mm_struct *mm, unsigned long start, start 1350 mm/gup.c vma = find_vma(mm, start); start 1360 mm/gup.c pages[i] = virt_to_page(start); start 1366 mm/gup.c start = (start + PAGE_SIZE) & PAGE_MASK; start 1450 mm/gup.c unsigned long start, start 1522 mm/gup.c nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages, start 1537 mm/gup.c unsigned long start, start 1553 mm/gup.c unsigned long start, start 1577 mm/gup.c rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, start 1592 mm/gup.c rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, start 1604 mm/gup.c unsigned long start, start 1610 mm/gup.c return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, start 1622 mm/gup.c long get_user_pages(unsigned long start, unsigned long nr_pages, start 1626 mm/gup.c return __gup_longterm_locked(current, current->mm, start, nr_pages, start 1652 mm/gup.c long get_user_pages_locked(unsigned long start, unsigned long nr_pages, start 1665 mm/gup.c return __get_user_pages_locked(current, current->mm, start, nr_pages, start 1686 mm/gup.c long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, start 1703 mm/gup.c ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, start 2320 mm/gup.c static bool gup_fast_permitted(unsigned long start, unsigned long end) start 2339 mm/gup.c int __get_user_pages_fast(unsigned long start, int nr_pages, int write, start 2346 mm/gup.c start = untagged_addr(start) & PAGE_MASK; start 2348 mm/gup.c end = start + len; start 2350 mm/gup.c if (end <= start) start 2352 mm/gup.c if (unlikely(!access_ok((void __user *)start, len))) start 2374 mm/gup.c gup_fast_permitted(start, end)) { start 2376 mm/gup.c gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); start 2384 mm/gup.c static int __gup_longterm_unlocked(unsigned long start, int nr_pages, start 2396 mm/gup.c start, nr_pages, start 2400 mm/gup.c ret = get_user_pages_unlocked(start, nr_pages, start 2423 mm/gup.c int get_user_pages_fast(unsigned long start, int nr_pages, start 2433 mm/gup.c start = untagged_addr(start) & PAGE_MASK; start 2434 mm/gup.c addr = start; start 2436 mm/gup.c end = start + len; start 2438 mm/gup.c if (end <= start) start 2440 mm/gup.c if (unlikely(!access_ok((void __user *)start, len))) start 2451 mm/gup.c gup_fast_permitted(start, end)) { start 2460 mm/gup.c start += nr << PAGE_SHIFT; start 2463 mm/gup.c ret = __gup_longterm_unlocked(start, nr_pages - nr, start 220 mm/highmem.c start: start 254 mm/highmem.c goto start; start 109 mm/hmm.c if (nrange->end < range->start || nrange->start >= range->end) start 264 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 293 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 387 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 553 mm/hmm.c unsigned long start, start 560 mm/hmm.c unsigned long addr = start, i; start 567 mm/hmm.c return hmm_vma_walk_hole(start, end, walk); start 574 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 587 mm/hmm.c return hmm_pfns_bad(start, end, walk); start 604 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 615 mm/hmm.c return hmm_pfns_bad(start, end, walk); start 618 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 656 mm/hmm.c static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, start 661 mm/hmm.c unsigned long addr = start, next; start 669 mm/hmm.c return hmm_vma_walk_hole(start, end, walk); start 677 mm/hmm.c return hmm_vma_walk_hole(start, end, walk); start 679 mm/hmm.c i = (addr - range->start) >> PAGE_SHIFT; start 727 mm/hmm.c unsigned long start, unsigned long end, start 730 mm/hmm.c unsigned long addr = start, i, pfn; start 743 mm/hmm.c i = (start - range->start) >> PAGE_SHIFT; start 755 mm/hmm.c pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); start 799 mm/hmm.c if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1))) start 801 mm/hmm.c if (range->start >= range->end) start 893 mm/hmm.c unsigned long start = range->start, end; start 906 mm/hmm.c vma = find_vma(hmm->mmu_notifier.mm, start); start 917 mm/hmm.c range->start, range->end); start 922 mm/hmm.c hmm_vma_walk.last = start; start 927 mm/hmm.c walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops, start 931 mm/hmm.c ret = walk_page_range(vma->vm_mm, start, end, start 933 mm/hmm.c start = hmm_vma_walk.last; start 941 mm/hmm.c i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; start 946 mm/hmm.c start = end; start 948 mm/hmm.c } while (start < range->end); start 950 mm/hmm.c return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; start 974 mm/hmm.c npages = (range->end - range->start) >> PAGE_SHIFT; start 1060 mm/hmm.c if (range->end <= range->start) start 1067 mm/hmm.c npages = (range->end - range->start) >> PAGE_SHIFT; start 2093 mm/huge_memory.c __split_huge_pud_locked(vma, pud, range.start); start 2325 mm/huge_memory.c __split_huge_pmd_locked(vma, pmd, range.start, freeze); start 2370 mm/huge_memory.c unsigned long start, start 2379 mm/huge_memory.c if (start & ~HPAGE_PMD_MASK && start 2380 mm/huge_memory.c (start & HPAGE_PMD_MASK) >= vma->vm_start && start 2381 mm/huge_memory.c (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) start 2382 mm/huge_memory.c split_huge_pmd_address(vma, start, false, NULL); start 3300 mm/hugetlb.c unsigned long reserve, start, end; start 3306 mm/hugetlb.c start = vma_hugecache_offset(h, vma, vma->vm_start); start 3309 mm/hugetlb.c reserve = (end - start) - region_count(resv, start, end); start 3520 mm/hugetlb.c unsigned long start, unsigned long end, start 3534 mm/hugetlb.c BUG_ON(start & ~huge_page_mask(h)); start 3547 mm/hugetlb.c mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, start 3549 mm/hugetlb.c adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); start 3551 mm/hugetlb.c address = start; start 3623 mm/hugetlb.c struct vm_area_struct *vma, unsigned long start, start 3626 mm/hugetlb.c __unmap_hugepage_range(tlb, vma, start, end, ref_page); start 3641 mm/hugetlb.c void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, start 3646 mm/hugetlb.c unsigned long tlb_start = start; start 3661 mm/hugetlb.c __unmap_hugepage_range(&tlb, vma, start, end, ref_page); start 3830 mm/hugetlb.c mmu_notifier_invalidate_range(mm, range.start, range.end); start 4557 mm/hugetlb.c unsigned long start = address; start 4571 mm/hugetlb.c 0, vma, mm, start, end); start 4572 mm/hugetlb.c adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); start 4575 mm/hugetlb.c flush_cache_range(vma, range.start, range.end); start 4630 mm/hugetlb.c flush_hugetlb_tlb_range(vma, range.start, range.end); start 4632 mm/hugetlb.c flush_hugetlb_tlb_range(vma, start, end); start 4764 mm/hugetlb.c long hugetlb_unreserve_pages(struct inode *inode, long start, long end, start 4778 mm/hugetlb.c chg = region_del(resv_map, start, end); start 4847 mm/hugetlb.c unsigned long *start, unsigned long *end) start 4849 mm/hugetlb.c unsigned long check_addr = *start; start 4854 mm/hugetlb.c for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { start 4862 mm/hugetlb.c if (a_start < *start) start 4863 mm/hugetlb.c *start = a_start; start 4969 mm/hugetlb.c unsigned long *start, unsigned long *end) start 63 mm/internal.h ra->start, ra->size, ra->async_size); start 297 mm/internal.h unsigned long start, unsigned long end, int *nonblocking); start 299 mm/internal.h unsigned long start, unsigned long end); start 354 mm/internal.h unsigned long start, end; start 356 mm/internal.h start = __vma_address(page, vma); start 357 mm/internal.h end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); start 360 mm/internal.h VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); start 362 mm/internal.h return max(start, vma->vm_start); start 87 mm/kasan/generic.c static __always_inline unsigned long bytes_is_nonzero(const u8 *start, start 91 mm/kasan/generic.c if (unlikely(*start)) start 92 mm/kasan/generic.c return (unsigned long)start; start 93 mm/kasan/generic.c start++; start 100 mm/kasan/generic.c static __always_inline unsigned long memory_is_nonzero(const void *start, start 105 mm/kasan/generic.c unsigned int prefix = (unsigned long)start % 8; start 107 mm/kasan/generic.c if (end - start <= 16) start 108 mm/kasan/generic.c return bytes_is_nonzero(start, end - start); start 112 mm/kasan/generic.c ret = bytes_is_nonzero(start, prefix); start 115 mm/kasan/generic.c start += prefix; start 118 mm/kasan/generic.c words = (end - start) / 8; start 120 mm/kasan/generic.c if (unlikely(*(u64 *)start)) start 121 mm/kasan/generic.c return bytes_is_nonzero(start, 8); start 122 mm/kasan/generic.c start += 8; start 126 mm/kasan/generic.c return bytes_is_nonzero(start, (end - start) % 8); start 452 mm/kasan/init.c void kasan_remove_zero_shadow(void *start, unsigned long size) start 457 mm/kasan/init.c addr = (unsigned long)kasan_mem_to_shadow(start); start 460 mm/kasan/init.c if (WARN_ON((unsigned long)start % start 487 mm/kasan/init.c int kasan_add_zero_shadow(void *start, unsigned long size) start 492 mm/kasan/init.c shadow_start = kasan_mem_to_shadow(start); start 495 mm/kasan/init.c if (WARN_ON((unsigned long)start % start 1493 mm/khugepaged.c struct file *file, pgoff_t start, start 1500 mm/khugepaged.c pgoff_t index, end = start + HPAGE_PMD_NR; start 1502 mm/khugepaged.c XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); start 1507 mm/khugepaged.c VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); start 1540 mm/khugepaged.c new_page->index = start; start 1549 mm/khugepaged.c xas_set(&xas, start); start 1550 mm/khugepaged.c for (index = start; index < end; index++) { start 1561 mm/khugepaged.c if (index == start) { start 1726 mm/khugepaged.c index = start; start 1763 mm/khugepaged.c retract_page_tables(mapping, start); start 1777 mm/khugepaged.c xas_set(&xas, start); start 1816 mm/khugepaged.c struct file *file, pgoff_t start, struct page **hpage) start 1820 mm/khugepaged.c XA_STATE(xas, &mapping->i_pages, start); start 1829 mm/khugepaged.c xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { start 1884 mm/khugepaged.c collapse_file(mm, file, start, hpage, node); start 1892 mm/khugepaged.c struct file *file, pgoff_t start, struct page **hpage) start 122 mm/kmemleak.c unsigned long start; start 703 mm/kmemleak.c unsigned long start, end; start 719 mm/kmemleak.c start = object->pointer; start 721 mm/kmemleak.c if (ptr > start) start 722 mm/kmemleak.c create_object(start, ptr - start, object->min_count, start 818 mm/kmemleak.c area->start = ptr; start 1231 mm/kmemleak.c unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); start 1237 mm/kmemleak.c for (ptr = start; ptr < end; ptr++) { start 1301 mm/kmemleak.c static void scan_large_block(void *start, void *end) start 1305 mm/kmemleak.c while (start < end) { start 1306 mm/kmemleak.c next = min(start + MAX_SCAN_SIZE, end); start 1307 mm/kmemleak.c scan_block(start, next, NULL); start 1308 mm/kmemleak.c start = next; start 1335 mm/kmemleak.c void *start = (void *)object->pointer; start 1340 mm/kmemleak.c next = min(start + MAX_SCAN_SIZE, end); start 1341 mm/kmemleak.c scan_block(start, next, object); start 1343 mm/kmemleak.c start = next; start 1344 mm/kmemleak.c if (start >= end) start 1353 mm/kmemleak.c scan_block((void *)area->start, start 1354 mm/kmemleak.c (void *)(area->start + area->size), start 1692 mm/kmemleak.c .start = kmemleak_seq_start, start 844 mm/ksm.c unsigned long start, unsigned long end) start 849 mm/ksm.c for (addr = start; addr < end && !err; addr += PAGE_SIZE) { start 2436 mm/ksm.c int ksm_madvise(struct vm_area_struct *vma, unsigned long start, start 2478 mm/ksm.c err = unmerge_ksm_pages(vma, start, end); start 67 mm/madvise.c unsigned long start, unsigned long end, int behavior) start 117 mm/madvise.c error = ksm_madvise(vma, start, end, behavior, &new_flags); start 134 mm/madvise.c pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); start 135 mm/madvise.c *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, start 145 mm/madvise.c if (start != vma->vm_start) { start 150 mm/madvise.c error = __split_vma(mm, vma, start, 1); start 183 mm/madvise.c static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, start 193 mm/madvise.c for (index = start; index != end; index += PAGE_SIZE) { start 199 mm/madvise.c orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); start 200 mm/madvise.c pte = *(orig_pte + ((index - start) / PAGE_SIZE)); start 223 mm/madvise.c unsigned long start, unsigned long end, start 230 mm/madvise.c for (; start < end; start += PAGE_SIZE) { start 231 mm/madvise.c index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; start 255 mm/madvise.c unsigned long start, unsigned long end) start 263 mm/madvise.c walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); start 269 mm/madvise.c force_shm_swapin_readahead(vma, start, end, start 292 mm/madvise.c offset = (loff_t)(start - vma->vm_start) start 294 mm/madvise.c vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); start 710 mm/madvise.c range.start = max(vma->vm_start, start_addr); start 711 mm/madvise.c if (range.start >= vma->vm_end) start 717 mm/madvise.c range.start, range.end); start 720 mm/madvise.c tlb_gather_mmu(&tlb, mm, range.start, range.end); start 725 mm/madvise.c walk_page_range(vma->vm_mm, range.start, range.end, start 729 mm/madvise.c tlb_finish_mmu(&tlb, range.start, range.end); start 754 mm/madvise.c unsigned long start, unsigned long end) start 756 mm/madvise.c zap_page_range(vma, start, end - start); start 762 mm/madvise.c unsigned long start, unsigned long end, start 769 mm/madvise.c if (!userfaultfd_remove(vma, start, end)) { start 773 mm/madvise.c vma = find_vma(current->mm, start); start 776 mm/madvise.c if (start < vma->vm_start) { start 805 mm/madvise.c VM_WARN_ON(start >= end); start 809 mm/madvise.c return madvise_dontneed_single_vma(vma, start, end); start 811 mm/madvise.c return madvise_free_single_vma(vma, start, end); start 822 mm/madvise.c unsigned long start, unsigned long end) start 842 mm/madvise.c offset = (loff_t)(start - vma->vm_start) start 852 mm/madvise.c if (userfaultfd_remove(vma, start, end)) { start 858 mm/madvise.c offset, end - start); start 869 mm/madvise.c unsigned long start, unsigned long end) start 879 mm/madvise.c for (; start < end; start += PAGE_SIZE << order) { start 883 mm/madvise.c ret = get_user_pages_fast(start, 1, 0, &page); start 902 mm/madvise.c pfn, start); start 911 mm/madvise.c pfn, start); start 935 mm/madvise.c unsigned long start, unsigned long end, int behavior) start 939 mm/madvise.c return madvise_remove(vma, prev, start, end); start 941 mm/madvise.c return madvise_willneed(vma, prev, start, end); start 943 mm/madvise.c return madvise_cold(vma, prev, start, end); start 945 mm/madvise.c return madvise_pageout(vma, prev, start, end); start 948 mm/madvise.c return madvise_dontneed_free(vma, prev, start, end, behavior); start 950 mm/madvise.c return madvise_behavior(vma, prev, start, end, behavior); start 1053 mm/madvise.c SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) start 1063 mm/madvise.c start = untagged_addr(start); start 1068 mm/madvise.c if (start & ~PAGE_MASK) start 1076 mm/madvise.c end = start + len; start 1077 mm/madvise.c if (end < start) start 1081 mm/madvise.c if (end == start) start 1086 mm/madvise.c return madvise_inject_error(behavior, start, start + len_in); start 1102 mm/madvise.c vma = find_vma_prev(current->mm, start, &prev); start 1103 mm/madvise.c if (vma && start > vma->vm_start) start 1114 mm/madvise.c if (start < vma->vm_start) { start 1116 mm/madvise.c start = vma->vm_start; start 1117 mm/madvise.c if (start >= end) start 1127 mm/madvise.c error = madvise_vma(vma, &prev, start, tmp, behavior); start 1130 mm/madvise.c start = tmp; start 1131 mm/madvise.c if (prev && start < prev->vm_end) start 1132 mm/madvise.c start = prev->vm_end; start 1134 mm/madvise.c if (start >= end) start 1139 mm/madvise.c vma = find_vma(current->mm, start); start 190 mm/memblock.c __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, start 198 mm/memblock.c this_start = clamp(this_start, start, end); start 199 mm/memblock.c this_end = clamp(this_end, start, end); start 225 mm/memblock.c __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, start 234 mm/memblock.c this_start = clamp(this_start, start, end); start 235 mm/memblock.c this_end = clamp(this_end, start, end); start 272 mm/memblock.c phys_addr_t align, phys_addr_t start, start 284 mm/memblock.c start = max_t(phys_addr_t, start, PAGE_SIZE); start 285 mm/memblock.c end = max(start, end); start 296 mm/memblock.c bottom_up_start = max(start, kernel_end); start 318 mm/memblock.c return __memblock_find_range_top_down(start, end, size, align, nid, start 335 mm/memblock.c phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, start 343 mm/memblock.c ret = memblock_find_in_range_node(size, align, start, end, start 1344 mm/memblock.c phys_addr_t align, phys_addr_t start, start 1360 mm/memblock.c found = memblock_find_in_range_node(size, align, start, end, nid, start 1366 mm/memblock.c found = memblock_find_in_range_node(size, align, start, start 1410 mm/memblock.c phys_addr_t start, start 1413 mm/memblock.c return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); start 1806 mm/memblock.c phys_addr_t start, end, orig_start, orig_end; start 1812 mm/memblock.c start = round_up(orig_start, align); start 1815 mm/memblock.c if (start == orig_start && end == orig_end) start 1818 mm/memblock.c if (start < end) { start 1819 mm/memblock.c r->base = start; start 1820 mm/memblock.c r->size = end - start; start 1892 mm/memblock.c static void __init __free_pages_memory(unsigned long start, unsigned long end) start 1896 mm/memblock.c while (start < end) { start 1897 mm/memblock.c order = min(MAX_ORDER - 1UL, __ffs(start)); start 1899 mm/memblock.c while (start + (1UL << order) > end) start 1902 mm/memblock.c memblock_free_pages(pfn_to_page(start), start, order); start 1904 mm/memblock.c start += (1UL << order); start 1908 mm/memblock.c static unsigned long __init __free_memory_core(phys_addr_t start, start 1911 mm/memblock.c unsigned long start_pfn = PFN_UP(start); start 1926 mm/memblock.c phys_addr_t start, end; start 1931 mm/memblock.c for_each_reserved_mem_region(i, &start, &end) start 1932 mm/memblock.c reserve_bootmem_region(start, end); start 1939 mm/memblock.c for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, start 1941 mm/memblock.c count += __free_memory_core(start, end); start 1165 mm/memory-failure.c loff_t start; start 1217 mm/memory-failure.c start = (page->index << PAGE_SHIFT) & ~(size - 1); start 1218 mm/memory-failure.c unmap_mapping_range(page->mapping, start, start + size, 0); start 210 mm/memory.c unsigned long start; start 212 mm/memory.c start = addr; start 221 mm/memory.c start &= PUD_MASK; start 222 mm/memory.c if (start < floor) start 232 mm/memory.c pmd = pmd_offset(pud, start); start 234 mm/memory.c pmd_free_tlb(tlb, pmd, start); start 244 mm/memory.c unsigned long start; start 246 mm/memory.c start = addr; start 255 mm/memory.c start &= P4D_MASK; start 256 mm/memory.c if (start < floor) start 266 mm/memory.c pud = pud_offset(p4d, start); start 268 mm/memory.c pud_free_tlb(tlb, pud, start); start 278 mm/memory.c unsigned long start; start 280 mm/memory.c start = addr; start 289 mm/memory.c start &= PGDIR_MASK; start 290 mm/memory.c if (start < floor) start 300 mm/memory.c p4d = p4d_offset(pgd, start); start 302 mm/memory.c p4d_free_tlb(tlb, p4d, start); start 1248 mm/memory.c unsigned long start = max(vma->vm_start, start_addr); start 1251 mm/memory.c if (start >= vma->vm_end) start 1258 mm/memory.c uprobe_munmap(vma, start, end); start 1263 mm/memory.c if (start != end) { start 1278 mm/memory.c __unmap_hugepage_range_final(tlb, vma, start, end, NULL); start 1282 mm/memory.c unmap_page_range(tlb, vma, start, end, details); start 1326 mm/memory.c void zap_page_range(struct vm_area_struct *vma, unsigned long start, start 1334 mm/memory.c start, start + size); start 1335 mm/memory.c tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); start 1339 mm/memory.c unmap_single_vma(&tlb, vma, start, range.end, NULL); start 1341 mm/memory.c tlb_finish_mmu(&tlb, start, range.end); start 1970 mm/memory.c int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) start 1975 mm/memory.c if (start + len < start) start 1982 mm/memory.c len += start & ~PAGE_MASK; start 1983 mm/memory.c pfn = start >> PAGE_SHIFT; start 2691 mm/memory.c void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, start 2697 mm/memory.c details.first_index = start; start 2698 mm/memory.c details.last_index = start + nr - 1; start 104 mm/memory_hotplug.c static struct resource *register_memory_resource(u64 start, u64 size) start 110 mm/memory_hotplug.c if (start + size > max_mem_size) start 118 mm/memory_hotplug.c res = __request_region(&iomem_resource, start, size, start 123 mm/memory_hotplug.c start, start + size); start 892 mm/memory_hotplug.c static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) start 895 mm/memory_hotplug.c unsigned long start_pfn = PFN_DOWN(start); start 971 mm/memory_hotplug.c static int __try_online_node(int nid, u64 start, bool set_node_online) start 979 mm/memory_hotplug.c pgdat = hotadd_new_pgdat(nid, start); start 1008 mm/memory_hotplug.c static int check_hotplug_memory_range(u64 start, u64 size) start 1011 mm/memory_hotplug.c if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || start 1014 mm/memory_hotplug.c memory_block_size_bytes(), start, size); start 1035 mm/memory_hotplug.c u64 start, size; start 1039 mm/memory_hotplug.c start = res->start; start 1042 mm/memory_hotplug.c ret = check_hotplug_memory_range(start, size); start 1054 mm/memory_hotplug.c memblock_add_node(start, size, nid); start 1056 mm/memory_hotplug.c ret = __try_online_node(nid, start, false); start 1062 mm/memory_hotplug.c ret = arch_add_memory(nid, start, size, &restrictions); start 1067 mm/memory_hotplug.c ret = create_memory_block_devices(start, size); start 1069 mm/memory_hotplug.c arch_remove_memory(nid, start, size, NULL); start 1085 mm/memory_hotplug.c ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); start 1089 mm/memory_hotplug.c firmware_map_add_hotplug(start, start + size, "System RAM"); start 1096 mm/memory_hotplug.c walk_memory_blocks(start, size, NULL, online_memory_block); start 1103 mm/memory_hotplug.c memblock_remove(start, size); start 1109 mm/memory_hotplug.c int __ref __add_memory(int nid, u64 start, u64 size) start 1114 mm/memory_hotplug.c res = register_memory_resource(start, size); start 1124 mm/memory_hotplug.c int add_memory(int nid, u64 start, u64 size) start 1129 mm/memory_hotplug.c rc = __add_memory(nid, start, size); start 1219 mm/memory_hotplug.c unsigned long start, end; start 1245 mm/memory_hotplug.c start = pfn + i; start 1252 mm/memory_hotplug.c *valid_start = start; start 1266 mm/memory_hotplug.c static unsigned long scan_movable_pages(unsigned long start, unsigned long end) start 1270 mm/memory_hotplug.c for (pfn = start; pfn < end; pfn++) { start 1390 mm/memory_hotplug.c offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, start 1395 mm/memory_hotplug.c *offlined_pages += __offline_isolated_pages(start, start + nr_pages); start 1721 mm/memory_hotplug.c static void __release_memory_resource(resource_size_t start, start 1732 mm/memory_hotplug.c ret = release_mem_region_adjustable(&iomem_resource, start, size); start 1734 mm/memory_hotplug.c resource_size_t endres = start + size - 1; start 1737 mm/memory_hotplug.c &start, &endres, ret); start 1741 mm/memory_hotplug.c static int __ref try_remove_memory(int nid, u64 start, u64 size) start 1745 mm/memory_hotplug.c BUG_ON(check_hotplug_memory_range(start, size)); start 1752 mm/memory_hotplug.c rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); start 1757 mm/memory_hotplug.c firmware_map_remove(start, start + size, "System RAM"); start 1758 mm/memory_hotplug.c memblock_free(start, size); start 1759 mm/memory_hotplug.c memblock_remove(start, size); start 1765 mm/memory_hotplug.c remove_memory_block_devices(start, size); start 1769 mm/memory_hotplug.c arch_remove_memory(nid, start, size, NULL); start 1770 mm/memory_hotplug.c __release_memory_resource(start, size); start 1789 mm/memory_hotplug.c void __remove_memory(int nid, u64 start, u64 size) start 1796 mm/memory_hotplug.c if (try_remove_memory(nid, start, size)) start 1804 mm/memory_hotplug.c int remove_memory(int nid, u64 start, u64 size) start 1809 mm/memory_hotplug.c rc = try_remove_memory(nid, start, size); start 613 mm/mempolicy.c static int queue_pages_test_walk(unsigned long start, unsigned long end, start 631 mm/mempolicy.c if (vma->vm_start > start) start 632 mm/mempolicy.c start = vma->vm_start; start 648 mm/mempolicy.c change_prot_numa(vma, start, endvma); start 680 mm/mempolicy.c queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, start 691 mm/mempolicy.c return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); start 731 mm/mempolicy.c static int mbind_range(struct mm_struct *mm, unsigned long start, start 742 mm/mempolicy.c vma = find_vma(mm, start); start 743 mm/mempolicy.c if (!vma || vma->vm_start > start) start 747 mm/mempolicy.c if (start > vma->vm_start) start 752 mm/mempolicy.c vmstart = max(start, vma->vm_start); start 1165 mm/mempolicy.c static struct page *new_page(struct page *page, unsigned long start) start 1170 mm/mempolicy.c vma = find_vma(current->mm, start); start 1211 mm/mempolicy.c static struct page *new_page(struct page *page, unsigned long start) start 1217 mm/mempolicy.c static long do_mbind(unsigned long start, unsigned long len, start 1233 mm/mempolicy.c if (start & ~PAGE_MASK) start 1240 mm/mempolicy.c end = start + len; start 1242 mm/mempolicy.c if (end < start) start 1244 mm/mempolicy.c if (end == start) start 1262 mm/mempolicy.c start, start + len, mode, mode_flags, start 1287 mm/mempolicy.c ret = queue_pages_range(mm, start, end, nmask, start 1295 mm/mempolicy.c err = mbind_range(mm, start, end, new); start 1303 mm/mempolicy.c start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); start 1404 mm/mempolicy.c static long kernel_mbind(unsigned long start, unsigned long len, start 1412 mm/mempolicy.c start = untagged_addr(start); start 1423 mm/mempolicy.c return do_mbind(start, len, mode, mode_flags, &nodes, flags); start 1426 mm/mempolicy.c SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, start 1430 mm/mempolicy.c return kernel_mbind(start, len, mode, nmask, maxnode, flags); start 1645 mm/mempolicy.c COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, start 1664 mm/mempolicy.c return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); start 2288 mm/mempolicy.c sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) start 2295 mm/mempolicy.c if (start >= p->end) start 2297 mm/mempolicy.c else if (end <= p->start) start 2310 mm/mempolicy.c if (w->end <= start) start 2330 mm/mempolicy.c if (new->start < nd->start) start 2339 mm/mempolicy.c pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, start 2470 mm/mempolicy.c pr_debug("deleting %lx-l%lx\n", n->start, n->end); start 2475 mm/mempolicy.c static void sp_node_init(struct sp_node *node, unsigned long start, start 2478 mm/mempolicy.c node->start = start; start 2483 mm/mempolicy.c static struct sp_node *sp_alloc(unsigned long start, unsigned long end, start 2499 mm/mempolicy.c sp_node_init(n, start, end, newpol); start 2505 mm/mempolicy.c static int shared_policy_replace(struct shared_policy *sp, unsigned long start, start 2515 mm/mempolicy.c n = sp_lookup(sp, start, end); start 2517 mm/mempolicy.c while (n && n->start < end) { start 2519 mm/mempolicy.c if (n->start >= start) { start 2523 mm/mempolicy.c n->start = end; start 2533 mm/mempolicy.c n->end = start; start 2539 mm/mempolicy.c n->end = start; start 29 mm/mempool.c const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); start 35 mm/mempool.c pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); start 36 mm/mempool.c for (i = start; i < end; i++) start 51 mm/memremap.c xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), start 58 mm/memremap.c return PHYS_PFN(pgmap->res.start) + start 66 mm/memremap.c return (res->start + resource_size(res)) >> PAGE_SHIFT; start 123 mm/memremap.c __remove_pages(PHYS_PFN(res->start), start 126 mm/memremap.c arch_remove_memory(nid, res->start, resource_size(res), start 128 mm/memremap.c kasan_remove_zero_shadow(__va(res->start), resource_size(res)); start 132 mm/memremap.c untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); start 221 mm/memremap.c conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); start 237 mm/memremap.c is_ram = region_intersects(res->start, resource_size(res), start 247 mm/memremap.c error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), start 255 mm/memremap.c error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, start 274 mm/memremap.c error = add_pages(nid, PHYS_PFN(res->start), start 277 mm/memremap.c error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); start 283 mm/memremap.c error = arch_add_memory(nid, res->start, resource_size(res), start 291 mm/memremap.c move_pfn_range_to_zone(zone, PHYS_PFN(res->start), start 304 mm/memremap.c PHYS_PFN(res->start), start 307 mm/memremap.c return __va(res->start); start 310 mm/memremap.c kasan_remove_zero_shadow(__va(res->start), resource_size(res)); start 312 mm/memremap.c untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); start 396 mm/memremap.c if (phys >= pgmap->res.start && phys <= pgmap->res.end) start 37 mm/memtest.c u64 *p, *start, *end; start 43 mm/memtest.c start = __va(start_phys_aligned); start 44 mm/memtest.c end = start + (size - (start_phys_aligned - start_phys)) / incr; start 48 mm/memtest.c for (p = start; p < end; p++) start 51 mm/memtest.c for (p = start; p < end; p++, start_phys_aligned += incr) { start 66 mm/memtest.c static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) start 73 mm/memtest.c this_start = clamp(this_start, start, end); start 74 mm/memtest.c this_end = clamp(this_end, start, end); start 100 mm/memtest.c void __init early_memtest(phys_addr_t start, phys_addr_t end) start 111 mm/memtest.c do_one_pass(patterns[idx], start, end); start 1490 mm/migrate.c static int store_status(int __user *status, int start, int value, int nr) start 1493 mm/migrate.c if (put_user(value, status + start)) start 1495 mm/migrate.c start++; start 1602 mm/migrate.c int start, i; start 1607 mm/migrate.c for (i = start = 0; i < nr_pages; i++) { start 1631 mm/migrate.c start = i; start 1647 mm/migrate.c err = store_status(status, start, current_node, i - start); start 1650 mm/migrate.c start = i; start 1682 mm/migrate.c if (i > start) { start 1683 mm/migrate.c err = store_status(status, start, current_node, i - start); start 1703 mm/migrate.c err1 = store_status(status, start, current_node, i - start); start 2035 mm/migrate.c unsigned long start = address & HPAGE_PMD_MASK; start 2059 mm/migrate.c flush_cache_range(vma, start, start + HPAGE_PMD_SIZE); start 2097 mm/migrate.c page_add_anon_rmap(new_page, vma, start, true); start 2109 mm/migrate.c set_pmd_at(mm, start, pmd, entry); start 2141 mm/migrate.c set_pmd_at(mm, start, pmd, entry); start 2156 mm/migrate.c static int migrate_vma_collect_hole(unsigned long start, start 2163 mm/migrate.c for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { start 2173 mm/migrate.c static int migrate_vma_collect_skip(unsigned long start, start 2180 mm/migrate.c for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { start 2189 mm/migrate.c unsigned long start, start 2196 mm/migrate.c unsigned long addr = start, unmapped = 0; start 2202 mm/migrate.c return migrate_vma_collect_hole(start, end, walk); start 2218 mm/migrate.c return migrate_vma_collect_skip(start, end, start 2226 mm/migrate.c return migrate_vma_collect_skip(start, end, start 2232 mm/migrate.c return migrate_vma_collect_skip(start, end, start 2235 mm/migrate.c return migrate_vma_collect_hole(start, end, start 2241 mm/migrate.c return migrate_vma_collect_skip(start, end, walk); start 2347 mm/migrate.c flush_tlb_range(walk->vma, start, end); start 2370 mm/migrate.c migrate->vma->vm_mm, migrate->start, migrate->end); start 2373 mm/migrate.c walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, start 2377 mm/migrate.c migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); start 2445 mm/migrate.c const unsigned long start = migrate->start; start 2526 mm/migrate.c for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { start 2556 mm/migrate.c const unsigned long start = migrate->start; start 2580 mm/migrate.c for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { start 2665 mm/migrate.c long nr_pages = (args->end - args->start) >> PAGE_SHIFT; start 2667 mm/migrate.c args->start &= PAGE_MASK; start 2674 mm/migrate.c if (args->start < args->vma->vm_start || start 2675 mm/migrate.c args->start >= args->vma->vm_end) start 2845 mm/migrate.c const unsigned long start = migrate->start; start 2850 mm/migrate.c for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { start 252 mm/mincore.c SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, start 259 mm/mincore.c start = untagged_addr(start); start 262 mm/mincore.c if (start & ~PAGE_MASK) start 266 mm/mincore.c if (!access_ok((void __user *) start, len)) start 287 mm/mincore.c retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); start 298 mm/mincore.c start += retval << PAGE_SHIFT; start 376 mm/mlock.c unsigned long start, unsigned long end) start 386 mm/mlock.c pte = get_locked_pte(vma->vm_mm, start, &ptl); start 388 mm/mlock.c end = pgd_addr_end(start, end); start 389 mm/mlock.c end = p4d_addr_end(start, end); start 390 mm/mlock.c end = pud_addr_end(start, end); start 391 mm/mlock.c end = pmd_addr_end(start, end); start 394 mm/mlock.c start += PAGE_SIZE; start 395 mm/mlock.c while (start < end) { start 399 mm/mlock.c page = vm_normal_page(vma, start, *pte); start 419 mm/mlock.c start += PAGE_SIZE; start 424 mm/mlock.c return start; start 446 mm/mlock.c unsigned long start, unsigned long end) start 450 mm/mlock.c while (start < end) { start 465 mm/mlock.c page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); start 497 mm/mlock.c start = __munlock_pagevec_fill(&pvec, vma, start 498 mm/mlock.c zone, start, end); start 504 mm/mlock.c start += page_increm * PAGE_SIZE; start 520 mm/mlock.c unsigned long start, unsigned long end, vm_flags_t newflags) start 535 mm/mlock.c pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); start 536 mm/mlock.c *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, start 544 mm/mlock.c if (start != vma->vm_start) { start 545 mm/mlock.c ret = split_vma(mm, vma, start, 1); start 560 mm/mlock.c nr_pages = (end - start) >> PAGE_SHIFT; start 576 mm/mlock.c munlock_vma_pages_range(vma, start, end); start 583 mm/mlock.c static int apply_vma_lock_flags(unsigned long start, size_t len, start 590 mm/mlock.c VM_BUG_ON(offset_in_page(start)); start 592 mm/mlock.c end = start + len; start 593 mm/mlock.c if (end < start) start 595 mm/mlock.c if (end == start) start 597 mm/mlock.c vma = find_vma(current->mm, start); start 598 mm/mlock.c if (!vma || vma->vm_start > start) start 602 mm/mlock.c if (start > vma->vm_start) start 605 mm/mlock.c for (nstart = start ; ; ) { start 640 mm/mlock.c unsigned long start, size_t len) start 648 mm/mlock.c vma = find_vma(mm, start); start 653 mm/mlock.c if (start >= vma->vm_end) start 655 mm/mlock.c if (start + len <= vma->vm_start) start 658 mm/mlock.c if (start > vma->vm_start) start 659 mm/mlock.c count -= (start - vma->vm_start); start 660 mm/mlock.c if (start + len < vma->vm_end) { start 661 mm/mlock.c count += start + len - vma->vm_start; start 671 mm/mlock.c static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) start 677 mm/mlock.c start = untagged_addr(start); start 682 mm/mlock.c len = PAGE_ALIGN(len + (offset_in_page(start))); start 683 mm/mlock.c start &= PAGE_MASK; start 701 mm/mlock.c start, len); start 706 mm/mlock.c error = apply_vma_lock_flags(start, len, flags); start 712 mm/mlock.c error = __mm_populate(start, len, 0); start 718 mm/mlock.c SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) start 720 mm/mlock.c return do_mlock(start, len, VM_LOCKED); start 723 mm/mlock.c SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) start 733 mm/mlock.c return do_mlock(start, len, vm_flags); start 736 mm/mlock.c SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) start 740 mm/mlock.c start = untagged_addr(start); start 742 mm/mlock.c len = PAGE_ALIGN(len + (offset_in_page(start))); start 743 mm/mlock.c start &= PAGE_MASK; start 747 mm/mlock.c ret = apply_vma_lock_flags(start, len, 0); start 78 mm/mmap.c unsigned long start, unsigned long end); start 717 mm/mmap.c int __vma_adjust(struct vm_area_struct *vma, unsigned long start, start 816 mm/mmap.c vma_adjust_trans_huge(orig_vma, start, end, adjust_next); start 857 mm/mmap.c if (start != vma->vm_start) { start 858 mm/mmap.c vma->vm_start = start; start 2552 mm/mmap.c unsigned long start; start 2565 mm/mmap.c start = vma->vm_start; start 2569 mm/mmap.c populate_vma_page_range(vma, addr, start, NULL); start 2607 mm/mmap.c unsigned long start, unsigned long end) start 2613 mm/mmap.c tlb_gather_mmu(&tlb, mm, start, end); start 2615 mm/mmap.c unmap_vmas(&tlb, vma, start, end); start 2618 mm/mmap.c tlb_finish_mmu(&tlb, start, end); start 2734 mm/mmap.c int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, start 2740 mm/mmap.c if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) start 2744 mm/mmap.c end = start + len; start 2753 mm/mmap.c arch_unmap(mm, start, end); start 2756 mm/mmap.c vma = find_vma(mm, start); start 2773 mm/mmap.c if (start > vma->vm_start) { start 2784 mm/mmap.c error = __split_vma(mm, vma, start, 0); start 2809 mm/mmap.c int error = userfaultfd_unmap_prep(vma, start, end, uf); start 2835 mm/mmap.c unmap_region(mm, vma, prev, start, end); start 2843 mm/mmap.c int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, start 2846 mm/mmap.c return __do_munmap(mm, start, len, uf, false); start 2849 mm/mmap.c static int __vm_munmap(unsigned long start, size_t len, bool downgrade) start 2858 mm/mmap.c ret = __do_munmap(mm, start, len, &uf, downgrade); start 2874 mm/mmap.c int vm_munmap(unsigned long start, size_t len) start 2876 mm/mmap.c return __vm_munmap(start, len, false); start 2891 mm/mmap.c SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, start 2906 mm/mmap.c start = start & PAGE_MASK; start 2909 mm/mmap.c if (start + size <= start) start 2919 mm/mmap.c vma = find_vma(mm, start); start 2924 mm/mmap.c if (start < vma->vm_start) start 2927 mm/mmap.c if (start + size > vma->vm_end) { start 2941 mm/mmap.c if (start + size <= next->vm_end) start 2960 mm/mmap.c for (tmp = vma; tmp->vm_start >= start + size; start 2966 mm/mmap.c vma_adjust_trans_huge(tmp, start, start + size, 0); start 2969 mm/mmap.c max(tmp->vm_start, start), start 2970 mm/mmap.c min(tmp->vm_end, start + size)); start 2975 mm/mmap.c ret = do_mmap_pgoff(vma->vm_file, start, size, start 207 mm/mmu_gather.c unsigned long start, unsigned long end) start 212 mm/mmu_gather.c tlb->fullmm = !(start | (end+1)); start 244 mm/mmu_gather.c unsigned long start, unsigned long end) start 96 mm/mmu_notifier.c unsigned long start, start 105 mm/mmu_notifier.c young |= mn->ops->clear_flush_young(mn, mm, start, end); start 113 mm/mmu_notifier.c unsigned long start, start 122 mm/mmu_notifier.c young |= mn->ops->clear_young(mn, mm, start, end); start 217 mm/mmu_notifier.c range->start, start 232 mm/mmu_notifier.c unsigned long start, unsigned long end) start 240 mm/mmu_notifier.c mn->ops->invalidate_range(mn, mm, start, end); start 199 mm/mprotect.c range.start = 0; start 220 mm/mprotect.c if (!range.start) { start 253 mm/mprotect.c if (range.start) start 308 mm/mprotect.c unsigned long start = addr; start 325 mm/mprotect.c flush_tlb_range(vma, start, end); start 331 mm/mprotect.c unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, start 338 mm/mprotect.c pages = hugetlb_change_protection(vma, start, end, newprot); start 340 mm/mprotect.c pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); start 374 mm/mprotect.c unsigned long start, unsigned long end, unsigned long newflags) start 378 mm/mprotect.c long nrpages = (end - start) >> PAGE_SHIFT; start 399 mm/mprotect.c error = walk_page_range(current->mm, start, end, start 428 mm/mprotect.c pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); start 429 mm/mprotect.c *pprev = vma_merge(mm, *pprev, start, end, newflags, start 440 mm/mprotect.c if (start != vma->vm_start) { start 441 mm/mprotect.c error = split_vma(mm, vma, start, 1); start 461 mm/mprotect.c change_protection(vma, start, end, vma->vm_page_prot, start 470 mm/mprotect.c populate_vma_page_range(vma, start, end, NULL); start 486 mm/mprotect.c static int do_mprotect_pkey(unsigned long start, size_t len, start 496 mm/mprotect.c start = untagged_addr(start); start 502 mm/mprotect.c if (start & ~PAGE_MASK) start 507 mm/mprotect.c end = start + len; start 508 mm/mprotect.c if (end <= start) start 510 mm/mprotect.c if (!arch_validate_prot(prot, start)) start 526 mm/mprotect.c vma = find_vma(current->mm, start); start 534 mm/mprotect.c start = vma->vm_start; start 539 mm/mprotect.c if (vma->vm_start > start) start 548 mm/mprotect.c if (start > vma->vm_start) start 551 mm/mprotect.c for (nstart = start ; ; ) { start 609 mm/mprotect.c SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, start 612 mm/mprotect.c return do_mprotect_pkey(start, len, prot, -1); start 617 mm/mprotect.c SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, start 620 mm/mprotect.c return do_mprotect_pkey(start, len, prot, pkey); start 32 mm/msync.c SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) start 40 mm/msync.c start = untagged_addr(start); start 44 mm/msync.c if (offset_in_page(start)) start 50 mm/msync.c end = start + len; start 51 mm/msync.c if (end < start) start 54 mm/msync.c if (end == start) start 61 mm/msync.c vma = find_vma(mm, start); start 71 mm/msync.c if (start < vma->vm_start) { start 72 mm/msync.c start = vma->vm_start; start 73 mm/msync.c if (start >= end) start 84 mm/msync.c fstart = (start - vma->vm_start) + start 86 mm/msync.c fend = fstart + (min(end, vma->vm_end) - start) - 1; start 87 mm/msync.c start = vma->vm_end; start 94 mm/msync.c if (error || start >= end) start 97 mm/msync.c vma = find_vma(mm, start); start 99 mm/msync.c if (start >= end) { start 1164 mm/nommu.c unsigned long pglen, rpglen, pgend, rpgend, start; start 1202 mm/nommu.c start = pregion->vm_start; start 1203 mm/nommu.c start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; start 1204 mm/nommu.c vma->vm_start = start; start 1205 mm/nommu.c vma->vm_end = start + len; start 1223 mm/nommu.c result = start; start 1485 mm/nommu.c int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) start 1495 mm/nommu.c end = start + len; start 1498 mm/nommu.c vma = find_vma(mm, start); start 1504 mm/nommu.c start, start + len - 1); start 1513 mm/nommu.c if (start > vma->vm_start) start 1522 mm/nommu.c if (start == vma->vm_start && end == vma->vm_end) start 1524 mm/nommu.c if (start < vma->vm_start || end > vma->vm_end) start 1526 mm/nommu.c if (offset_in_page(start)) start 1530 mm/nommu.c if (start != vma->vm_start && end != vma->vm_end) { start 1531 mm/nommu.c ret = split_vma(mm, vma, start, 1); start 1535 mm/nommu.c return shrink_vma(mm, vma, start, end); start 1662 mm/nommu.c int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) start 1664 mm/nommu.c unsigned long pfn = start >> PAGE_SHIFT; start 85 mm/oom_kill.c static bool oom_cpuset_eligible(struct task_struct *start, start 96 mm/oom_kill.c for_each_thread(start, tsk) { start 546 mm/oom_kill.c tlb_gather_mmu(&tlb, mm, range.start, range.end); start 548 mm/oom_kill.c tlb_finish_mmu(&tlb, range.start, range.end); start 552 mm/oom_kill.c unmap_page_range(&tlb, vma, range.start, range.end, NULL); start 554 mm/oom_kill.c tlb_finish_mmu(&tlb, range.start, range.end); start 2106 mm/page-writeback.c pgoff_t start, pgoff_t end) start 2108 mm/page-writeback.c XA_STATE(xas, &mapping->i_pages, start); start 1390 mm/page_alloc.c void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) start 1392 mm/page_alloc.c unsigned long start_pfn = PFN_DOWN(start); start 1774 mm/page_alloc.c unsigned long start = jiffies; start 1822 mm/page_alloc.c pgdat->node_id, nr_pages, jiffies_to_msecs(jiffies - start)); start 5955 mm/page_alloc.c unsigned long start = jiffies; start 6015 mm/page_alloc.c size, jiffies_to_msecs(jiffies - start)); start 6823 mm/page_alloc.c unsigned long __maybe_unused start = 0; start 6830 mm/page_alloc.c start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); start 6831 mm/page_alloc.c offset = pgdat->node_start_pfn - start; start 6844 mm/page_alloc.c size = (end - start) * sizeof(struct page); start 6950 mm/page_alloc.c phys_addr_t start, end; start 6959 mm/page_alloc.c NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) { start 6960 mm/page_alloc.c if (next < start) start 6961 mm/page_alloc.c pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start)); start 7021 mm/page_alloc.c unsigned long start, end, mask; start 7025 mm/page_alloc.c for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { start 7026 mm/page_alloc.c if (!start || last_nid < 0 || last_nid == nid) { start 7037 mm/page_alloc.c mask = ~((1 << __ffs(start)) - 1); start 7038 mm/page_alloc.c while (mask && last_end <= (start & (mask << 1))) start 7493 mm/page_alloc.c unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) start 7498 mm/page_alloc.c start = (void *)PAGE_ALIGN((unsigned long)start); start 7500 mm/page_alloc.c for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { start 7556 mm/page_alloc.c #define adj_init_size(start, end, size, pos, adj) \ start 7558 mm/page_alloc.c if (start <= pos && pos < end && size > adj) \ start 8319 mm/page_alloc.c unsigned long start, unsigned long end) start 8323 mm/page_alloc.c unsigned long pfn = start; start 8383 mm/page_alloc.c int alloc_contig_range(unsigned long start, unsigned long end, start 8393 mm/page_alloc.c .zone = page_zone(pfn_to_page(start)), start 8425 mm/page_alloc.c ret = start_isolate_page_range(pfn_max_align_down(start), start 8440 mm/page_alloc.c ret = __alloc_contig_migrate_range(&cc, start, end); start 8465 mm/page_alloc.c outer_start = start; start 8468 mm/page_alloc.c outer_start = start; start 8474 mm/page_alloc.c if (outer_start != start) { start 8483 mm/page_alloc.c if (outer_start + (1UL << order) <= start) start 8484 mm/page_alloc.c outer_start = start; start 8503 mm/page_alloc.c if (start != outer_start) start 8504 mm/page_alloc.c free_contig_range(outer_start, start - outer_start); start 8509 mm/page_alloc.c undo_isolate_page_range(pfn_max_align_down(start), start 290 mm/page_ext.c unsigned long start, end, pfn; start 293 mm/page_ext.c start = SECTION_ALIGN_DOWN(start_pfn); start 306 mm/page_ext.c for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { start 315 mm/page_ext.c for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) start 324 mm/page_ext.c unsigned long start, end, pfn; start 326 mm/page_ext.c start = SECTION_ALIGN_DOWN(start_pfn); start 329 mm/page_ext.c for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) start 69 mm/page_poison.c unsigned char *start; start 75 mm/page_poison.c start = memchr_inv(mem, PAGE_POISON, bytes); start 76 mm/page_poison.c if (!start) start 79 mm/page_poison.c for (end = mem + bytes - 1; end > start; end--) { start 86 mm/page_poison.c else if (start == end && single_bit_flip(*start, PAGE_POISON)) start 91 mm/page_poison.c print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, start 92 mm/page_poison.c end - start + 1, 1); start 264 mm/page_vma_mapped.c unsigned long start, end; start 266 mm/page_vma_mapped.c start = __vma_address(page, vma); start 267 mm/page_vma_mapped.c end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); start 269 mm/page_vma_mapped.c if (unlikely(end < vma->vm_start || start >= vma->vm_end)) start 271 mm/page_vma_mapped.c pvmw.address = max(start, vma->vm_start); start 225 mm/pagewalk.c static int walk_page_test(unsigned long start, unsigned long end, start 232 mm/pagewalk.c return ops->test_walk(start, end, walk); start 245 mm/pagewalk.c err = ops->pte_hole(start, end, walk); start 251 mm/pagewalk.c static int __walk_page_range(unsigned long start, unsigned long end, start 259 mm/pagewalk.c err = walk_hugetlb_range(start, end, walk); start 261 mm/pagewalk.c err = walk_pgd_range(start, end, walk); start 301 mm/pagewalk.c int walk_page_range(struct mm_struct *mm, unsigned long start, start 314 mm/pagewalk.c if (start >= end) start 322 mm/pagewalk.c vma = find_vma(walk.mm, start); start 327 mm/pagewalk.c } else if (start < vma->vm_start) { /* outside vma */ start 335 mm/pagewalk.c err = walk_page_test(start, next, &walk); start 349 mm/pagewalk.c err = __walk_page_range(start, next, &walk); start 352 mm/pagewalk.c } while (start = next, start < end); start 56 mm/percpu-stats.c int i, last_alloc, as_len, start, end; start 76 mm/percpu-stats.c start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; start 87 mm/percpu-stats.c while (start < last_alloc) { start 88 mm/percpu-stats.c if (test_bit(start, chunk->alloc_map)) { start 90 mm/percpu-stats.c start + 1); start 94 mm/percpu-stats.c start + 1); start 98 mm/percpu-stats.c alloc_sizes[as_len++] *= (end - start) * PCPU_MIN_ALLOC_SIZE; start 100 mm/percpu-stats.c start = end; start 290 mm/percpu.c #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ start 291 mm/percpu.c for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ start 295 mm/percpu.c #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ start 296 mm/percpu.c for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \ start 448 mm/percpu.c int start = pcpu_next_hint(block, alloc_bits); start 451 mm/percpu.c start; start 452 mm/percpu.c *bit_off = pcpu_block_off_to_off(i, start); start 603 mm/percpu.c static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) start 605 mm/percpu.c int contig = end - start; start 607 mm/percpu.c block->first_free = min(block->first_free, start); start 608 mm/percpu.c if (start == 0) start 616 mm/percpu.c if (start > block->contig_hint_start) { start 621 mm/percpu.c } else if (start < block->scan_hint_start) { start 632 mm/percpu.c block->contig_hint_start = start; start 636 mm/percpu.c (!start || start 637 mm/percpu.c __ffs(start) > __ffs(block->contig_hint_start))) { start 639 mm/percpu.c block->contig_hint_start = start; start 640 mm/percpu.c if (start < block->scan_hint_start && start 643 mm/percpu.c } else if (start > block->scan_hint_start || start 650 mm/percpu.c block->scan_hint_start = start; start 659 mm/percpu.c if ((start < block->contig_hint_start && start 662 mm/percpu.c start > block->scan_hint_start)))) { start 663 mm/percpu.c block->scan_hint_start = start; start 752 mm/percpu.c int rs, re, start; /* region start, region end */ start 756 mm/percpu.c start = block->scan_hint_start + block->scan_hint; start 761 mm/percpu.c start = block->first_free; start 768 mm/percpu.c pcpu_for_each_unpop_region(alloc_map, rs, re, start, start 938 mm/percpu.c int start, end; /* start and end of the whole free area */ start 964 mm/percpu.c start = s_off; start 966 mm/percpu.c start = s_block->contig_hint_start; start 975 mm/percpu.c start); start 976 mm/percpu.c start = (start == l_bit) ? 0 : l_bit + 1; start 988 mm/percpu.c if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) start 990 mm/percpu.c pcpu_block_update(s_block, start, e_off); start 1020 mm/percpu.c if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) start 1024 mm/percpu.c pcpu_block_off_to_off(s_index, start), start 1133 mm/percpu.c unsigned long start, start 1141 mm/percpu.c index = find_next_zero_bit(map, size, start); start 1161 mm/percpu.c start = i + 1; start 1187 mm/percpu.c size_t align, int start) start 1201 mm/percpu.c end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, start 1203 mm/percpu.c bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, start 2005 mm/percpu.c void *start = per_cpu_ptr(base, cpu); start 2008 mm/percpu.c if (va >= start && va < start + static_size) { start 2010 mm/percpu.c *can_addr = (unsigned long) (va - start); start 2085 mm/percpu.c void *start = per_cpu_ptr(base, cpu); start 2087 mm/percpu.c if (addr >= start && addr < start + pcpu_unit_size) { start 371 mm/readahead.c ra->start = offset; start 409 mm/readahead.c if ((offset == (ra->start + ra->size - ra->async_size) || start 410 mm/readahead.c offset == (ra->start + ra->size))) { start 411 mm/readahead.c ra->start += ra->size; start 424 mm/readahead.c pgoff_t start; start 427 mm/readahead.c start = page_cache_next_miss(mapping, offset + 1, max_pages); start 430 mm/readahead.c if (!start || start - offset > max_pages) start 433 mm/readahead.c ra->start = start; start 434 mm/readahead.c ra->size = start - offset; /* old async_size */ start 470 mm/readahead.c ra->start = offset; start 481 mm/readahead.c if (offset == ra->start && ra->size == ra->async_size) { start 1385 mm/rmap.c adjust_range_if_pmd_sharing_possible(vma, &range.start, start 1439 mm/rmap.c flush_cache_range(vma, range.start, range.end); start 1440 mm/rmap.c flush_tlb_range(vma, range.start, range.end); start 1441 mm/rmap.c mmu_notifier_invalidate_range(mm, range.start, start 17 mm/rodata_test.c unsigned long start, end; start 41 mm/rodata_test.c start = (unsigned long)__start_rodata; start 43 mm/rodata_test.c if (start & (PAGE_SIZE - 1)) { start 105 mm/shmem.c pgoff_t start; /* start of range currently being fallocated */ start 703 mm/shmem.c pgoff_t start, pgoff_t end) start 705 mm/shmem.c XA_STATE(xas, &mapping->i_pages, start); start 800 mm/shmem.c pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; start 814 mm/shmem.c index = start; start 877 mm/shmem.c shmem_getpage(inode, start - 1, &page, SGP_READ); start 880 mm/shmem.c if (start > end) { start 900 mm/shmem.c if (start >= end) start 903 mm/shmem.c index = start; start 912 mm/shmem.c if (index == start || end != -1) start 915 mm/shmem.c index = start; start 949 mm/shmem.c start++; start 1122 mm/shmem.c pgoff_t start, unsigned int nr_entries, start 1126 mm/shmem.c XA_STATE(xas, &mapping->i_pages, start); start 1204 mm/shmem.c pgoff_t start = 0; start 1217 mm/shmem.c pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, start 1237 mm/shmem.c start = indices[pvec.nr - 1]; start 1342 mm/shmem.c index >= shmem_falloc->start && start 2023 mm/shmem.c vmf->pgoff >= shmem_falloc->start && start 2693 mm/shmem.c pgoff_t start, end; start 2705 mm/shmem.c start = offset >> PAGE_SHIFT; start 2707 mm/shmem.c new_offset = shmem_seek_hole_data(mapping, start, end, whence); start 2732 mm/shmem.c pgoff_t start, index, end; start 2753 mm/shmem.c shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; start 2784 mm/shmem.c start = offset >> PAGE_SHIFT; start 2787 mm/shmem.c if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { start 2793 mm/shmem.c shmem_falloc.start = start; start 2794 mm/shmem.c shmem_falloc.next = start; start 2801 mm/shmem.c for (index = start; index < end; index++) { start 2816 mm/shmem.c if (index > start) { start 2818 mm/shmem.c (loff_t)start << PAGE_SHIFT, start 1565 mm/slab_common.c .start = slab_start, start 735 mm/slub.c u8 *start, unsigned int value, unsigned int bytes) start 741 mm/slub.c fault = memchr_inv(start, value, bytes); start 746 mm/slub.c end = start + bytes; start 821 mm/slub.c u8 *start; start 831 mm/slub.c start = page_address(page); start 833 mm/slub.c end = start + length; start 1550 mm/slub.c unsigned long *pos, void *start, start 1567 mm/slub.c return (char *)start + idx; start 1573 mm/slub.c void *start; start 1585 mm/slub.c start = fixup_red_left(s, page_address(page)); start 1588 mm/slub.c cur = next_freelist_entry(s, page, &pos, start, page_limit, start 1594 mm/slub.c next = next_freelist_entry(s, page, &pos, start, page_limit, start 1621 mm/slub.c void *start, *p, *next; start 1663 mm/slub.c start = page_address(page); start 1665 mm/slub.c setup_page_debug(s, page, start); start 1670 mm/slub.c start = fixup_red_left(s, start); start 1671 mm/slub.c start = setup_object(s, page, start); start 1672 mm/slub.c page->freelist = start; start 1673 mm/slub.c for (idx = 0, p = start; idx < page->objects - 1; idx++) { start 4542 mm/slub.c long start, end, pos; start 4547 mm/slub.c start = -1; start 4551 mm/slub.c pos = start + (end - start + 1) / 2; start 4587 mm/slub.c start = pos; start 133 mm/sparse-vmemmap.c unsigned long start, unsigned long end) start 140 mm/sparse-vmemmap.c start, end - 1); start 216 mm/sparse-vmemmap.c int __meminit vmemmap_populate_basepages(unsigned long start, start 219 mm/sparse-vmemmap.c unsigned long addr = start; start 251 mm/sparse-vmemmap.c unsigned long start; start 263 mm/sparse-vmemmap.c start = (unsigned long) pfn_to_page(pfn); start 264 mm/sparse-vmemmap.c end = start + nr_pages * sizeof(struct page); start 266 mm/sparse-vmemmap.c if (vmemmap_populate(start, end, nid, altmap)) start 211 mm/sparse.c #define for_each_present_section_nr(start, section_nr) \ start 212 mm/sparse.c for (section_nr = next_present_section_nr(start-1); \ start 258 mm/sparse.c void __init memory_present(int nid, unsigned long start, unsigned long end) start 275 mm/sparse.c start &= PAGE_SECTION_MASK; start 276 mm/sparse.c mminit_validate_memmodel_limits(&start, &end); start 277 mm/sparse.c for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { start 659 mm/sparse.c unsigned long start = (unsigned long) pfn_to_page(pfn); start 660 mm/sparse.c unsigned long end = start + nr_pages * sizeof(struct page); start 662 mm/sparse.c vmemmap_free(start, end, altmap); start 666 mm/sparse.c unsigned long start = (unsigned long)memmap; start 669 mm/sparse.c vmemmap_free(start, end, NULL); start 179 mm/swap.c int get_kernel_page(unsigned long start, int write, struct page **pages) start 182 mm/swap.c .iov_base = (void *)start, start 992 mm/swap.c pgoff_t start, unsigned nr_entries, start 995 mm/swap.c pvec->nr = find_get_entries(mapping, start, nr_entries, start 1042 mm/swap.c struct address_space *mapping, pgoff_t *start, pgoff_t end) start 1044 mm/swap.c pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, start 631 mm/swap_state.c unsigned long *start, start 634 mm/swap_state.c *start = max3(lpfn, PFN_DOWN(vma->vm_start), start 647 mm/swap_state.c unsigned long start, end; start 686 mm/swap_state.c swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); start 689 mm/swap_state.c &start, &end); start 693 mm/swap_state.c &start, &end); start 695 mm/swap_state.c ra_info->nr_pte = end - start; start 696 mm/swap_state.c ra_info->offset = fpfn - start; start 702 mm/swap_state.c for (pfn = start; pfn != end; pfn++) start 2779 mm/swapfile.c .start = swap_start, start 294 mm/truncate.c pgoff_t start; /* inclusive */ start 316 mm/truncate.c start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; start 328 mm/truncate.c index = start; start 375 mm/truncate.c struct page *page = find_lock_page(mapping, start - 1); start 378 mm/truncate.c if (start > end) { start 410 mm/truncate.c if (start >= end) start 413 mm/truncate.c index = start; start 419 mm/truncate.c if (index == start) start 422 mm/truncate.c index = start; start 425 mm/truncate.c if (index == start && indices[0] >= end) { start 439 mm/truncate.c index = start - 1; start 547 mm/truncate.c pgoff_t start, pgoff_t end) start 551 mm/truncate.c pgoff_t index = start; start 685 mm/truncate.c pgoff_t start, pgoff_t end) start 699 mm/truncate.c index = start; start 764 mm/truncate.c unmap_mapping_pages(mapping, start, end - start + 1, false); start 610 mm/userfaultfd.c ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, start 613 mm/userfaultfd.c return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing); start 94 mm/vmacache.c unsigned long start, start 97 mm/vmacache.c int idx = VMACACHE_HASH(start); start 108 mm/vmacache.c if (vma && vma->vm_start == start && vma->vm_end == end) { start 221 mm/vmalloc.c static int vmap_page_range_noflush(unsigned long start, unsigned long end, start 226 mm/vmalloc.c unsigned long addr = start; start 242 mm/vmalloc.c static int vmap_page_range(unsigned long start, unsigned long end, start 247 mm/vmalloc.c ret = vmap_page_range_noflush(start, end, prot, pages); start 248 mm/vmalloc.c flush_cache_vmap(start, end); start 1246 mm/vmalloc.c static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) start 1270 mm/vmalloc.c if (va->va_start < start) start 1271 mm/vmalloc.c start = va->va_start; start 1276 mm/vmalloc.c flush_tlb_kernel_range(start, end); start 1670 mm/vmalloc.c static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) start 1693 mm/vmalloc.c start = min(s, start); start 1705 mm/vmalloc.c if (!__purge_vmap_area_lazy(start, end) && flush) start 1706 mm/vmalloc.c flush_tlb_kernel_range(start, end); start 1725 mm/vmalloc.c unsigned long start = ULONG_MAX, end = 0; start 1728 mm/vmalloc.c _vm_unmap_aliases(start, end, flush); start 2042 mm/vmalloc.c unsigned long align, unsigned long flags, unsigned long start, start 2064 mm/vmalloc.c va = alloc_vmap_area(size, align, start, end, node, gfp_mask); start 2076 mm/vmalloc.c unsigned long start, unsigned long end) start 2078 mm/vmalloc.c return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, start 2084 mm/vmalloc.c unsigned long start, unsigned long end, start 2087 mm/vmalloc.c return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, start 2184 mm/vmalloc.c unsigned long start = ULONG_MAX, end = 0; start 2212 mm/vmalloc.c start = min(addr, start); start 2224 mm/vmalloc.c _vm_unmap_aliases(start, end, flush_dmap); start 2480 mm/vmalloc.c unsigned long start, unsigned long end, gfp_t gfp_mask, start 2493 mm/vmalloc.c vm_flags, start, end, node, gfp_mask, caller); start 3234 mm/vmalloc.c unsigned long base, start, size, end, last_end; start 3241 mm/vmalloc.c start = offsets[area]; start 3242 mm/vmalloc.c end = start + sizes[area]; start 3249 mm/vmalloc.c if (start > offsets[last_area]) start 3256 mm/vmalloc.c BUG_ON(start2 < end && start < end2); start 3282 mm/vmalloc.c start = offsets[area]; start 3283 mm/vmalloc.c end = start + sizes[area]; start 3315 mm/vmalloc.c if (base + start < va->va_start) { start 3330 mm/vmalloc.c start = offsets[area]; start 3331 mm/vmalloc.c end = start + sizes[area]; start 3339 mm/vmalloc.c start = base + offsets[area]; start 3342 mm/vmalloc.c va = pvm_find_va_enclose_addr(start); start 3347 mm/vmalloc.c type = classify_va_fit_type(va, start, size); start 3352 mm/vmalloc.c ret = adjust_va_to_fit_type(va, start, size, type); start 3358 mm/vmalloc.c va->va_start = start; start 3359 mm/vmalloc.c va->va_end = start + size; start 3557 mm/vmalloc.c .start = s_start, start 1532 mm/vmstat.c .start = frag_start, start 1539 mm/vmstat.c .start = frag_start, start 1654 mm/vmstat.c .start = frag_start, /* iterate over all zones. The same as in start 1741 mm/vmstat.c .start = vmstat_start, start 2064 mm/vmstat.c .start = frag_start, start 2116 mm/vmstat.c .start = frag_start, start 672 net/8021q/vlan_dev.c unsigned int start; start 676 net/8021q/vlan_dev.c start = u64_stats_fetch_begin_irq(&p->syncp); start 682 net/8021q/vlan_dev.c } while (u64_stats_fetch_retry_irq(&p->syncp, start)); start 67 net/8021q/vlanproc.c .start = vlan_seq_start, start 2206 net/9p/client.c fid->fid, flock->type, flock->flags, flock->start, start 2210 net/9p/client.c flock->flags, flock->start, flock->length, start 2239 net/9p/client.c glock->start, glock->length, glock->proc_id, glock->client_id); start 2242 net/9p/client.c glock->start, glock->length, glock->proc_id, glock->client_id); start 2248 net/9p/client.c &glock->start, &glock->length, &glock->proc_id, start 2255 net/9p/client.c "proc_id %d client_id %s\n", glock->type, glock->start, start 167 net/9p/trans_virtio.c static int pack_sg_list(struct scatterlist *sg, int start, start 171 net/9p/trans_virtio.c int index = start; start 184 net/9p/trans_virtio.c if (index-start) start 186 net/9p/trans_virtio.c return index-start; start 213 net/9p/trans_virtio.c pack_sg_list_p(struct scatterlist *sg, int start, int limit, start 218 net/9p/trans_virtio.c int index = start; start 220 net/9p/trans_virtio.c BUG_ON(nr_pages > (limit - start)); start 238 net/9p/trans_virtio.c if (index-start) start 240 net/9p/trans_virtio.c return index - start; start 1035 net/appletalk/aarp.c .start = aarp_seq_start, start 190 net/appletalk/atalk_proc.c .start = atalk_seq_interface_start, start 197 net/appletalk/atalk_proc.c .start = atalk_seq_route_start, start 204 net/appletalk/atalk_proc.c .start = atalk_seq_socket_start, start 928 net/appletalk/ddp.c int start = skb_headlen(skb); start 933 net/appletalk/ddp.c if ((copy = start - offset) > 0) { start 947 net/appletalk/ddp.c WARN_ON(start > offset + len); start 949 net/appletalk/ddp.c end = start + skb_frag_size(frag); start 957 net/appletalk/ddp.c offset - start, copy, sum); start 964 net/appletalk/ddp.c start = end; start 970 net/appletalk/ddp.c WARN_ON(start > offset + len); start 972 net/appletalk/ddp.c end = start + frag_iter->len; start 976 net/appletalk/ddp.c sum = atalk_sum_skb(frag_iter, offset - start, start 982 net/appletalk/ddp.c start = end; start 815 net/atm/br2684.c .start = br2684_seq_start, start 861 net/atm/clip.c .start = clip_seq_start, start 990 net/atm/lec.c .start = lec_seq_start, start 198 net/atm/mpoa_proc.c .start = mpc_start, start 242 net/atm/proc.c .start = atm_dev_seq_start, start 265 net/atm/proc.c .start = vcc_seq_start, start 287 net/atm/proc.c .start = vcc_seq_start, start 310 net/atm/proc.c .start = vcc_seq_start, start 1922 net/ax25/af_ax25.c .start = ax25_info_start, start 245 net/ax25/ax25_out.c unsigned short start, end, next; start 256 net/ax25/ax25_out.c start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs; start 259 net/ax25/ax25_out.c if (start == end) start 276 net/ax25/ax25_out.c ax25->vs = start; start 324 net/ax25/ax25_route.c .start = ax25_rt_seq_start, start 182 net/ax25/ax25_uid.c .start = ax25_uid_seq_start, start 672 net/bluetooth/af_bluetooth.c .start = bt_seq_start, start 135 net/bluetooth/bnep/bnep.h __u16 start; start 92 net/bluetooth/bnep/core.c s->proto_filter[0].start = ETH_P_IP; start 95 net/bluetooth/bnep/core.c s->proto_filter[1].start = ETH_P_RARP; start 98 net/bluetooth/bnep/core.c s->proto_filter[2].start = ETH_P_IPX; start 126 net/bluetooth/bnep/core.c f[i].start = get_unaligned_be16(data++); start 130 net/bluetooth/bnep/core.c f[i].start, f[i].end); start 75 net/bluetooth/bnep/netdev.c u8 start[ETH_ALEN] = { 0x01 }; start 78 net/bluetooth/bnep/netdev.c __skb_put_data(skb, start, ETH_ALEN); start 155 net/bluetooth/bnep/netdev.c if (proto >= f[i].start && proto <= f[i].end) start 744 net/bluetooth/hidp/core.c .start = hidp_start, start 205 net/bluetooth/l2cap_core.c u16 p, start, end, incr; start 208 net/bluetooth/l2cap_core.c start = L2CAP_PSM_DYN_START; start 212 net/bluetooth/l2cap_core.c start = L2CAP_PSM_LE_DYN_START; start 218 net/bluetooth/l2cap_core.c for (p = start; p <= end; p += incr) start 109 net/bpfilter/bpfilter_kern.c bpfilter_ops.start = &start_umh; start 121 net/bpfilter/bpfilter_kern.c bpfilter_ops.start = NULL; start 204 net/bridge/br_device.c unsigned int start; start 208 net/bridge/br_device.c start = u64_stats_fetch_begin_irq(&bstats->syncp); start 210 net/bridge/br_device.c } while (u64_stats_fetch_retry_irq(&bstats->syncp, start)); start 2442 net/bridge/br_multicast.c unsigned int start; start 2445 net/bridge/br_multicast.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 2447 net/bridge/br_multicast.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 1214 net/bridge/br_vlan.c unsigned int start; start 1218 net/bridge/br_vlan.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 1223 net/bridge/br_vlan.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 28 net/bridge/netfilter/ebt_among.c int start, limit, i; start 33 net/bridge/netfilter/ebt_among.c start = wh->table[key]; start 36 net/bridge/netfilter/ebt_among.c for (i = start; i < limit; i++) { start 43 net/bridge/netfilter/ebt_among.c for (i = start; i < limit; i++) { start 151 net/ceph/debugfs.c le64_to_cpu(monc->subs[i].item.start), start 295 net/ceph/mon_client.c le64_to_cpu(monc->subs[i].item.start), start 350 net/ceph/mon_client.c __le64 start = cpu_to_le64(epoch); start 357 net/ceph/mon_client.c monc->subs[sub].item.start == start && start 361 net/ceph/mon_client.c monc->subs[sub].item.start = start; start 395 net/ceph/mon_client.c monc->subs[sub].item.start = cpu_to_le64(epoch + 1); start 348 net/ceph/osdmap.c void *start = pbyval; start 395 net/ceph/osdmap.c i, (int)(*p-start), *p, end); start 430 net/ceph/osdmap.c b->size, (int)(*p-start), *p, end); start 483 net/ceph/osdmap.c i, (int)(*p-start), *p, end); start 489 net/ceph/osdmap.c i, (int)(*p-start), *p, end); start 1475 net/ceph/osdmap.c void *start = *p; start 1604 net/ceph/osdmap.c err, epoch, (int)(*p - start), *p, start, end); start 1607 net/ceph/osdmap.c start, end - start, true); start 1761 net/ceph/osdmap.c void *start = *p; start 1897 net/ceph/osdmap.c err, epoch, (int)(*p - start), *p, start, end); start 1900 net/ceph/osdmap.c start, end - start, true); start 421 net/core/datagram.c int start = skb_headlen(skb); start 422 net/core/datagram.c int i, copy = start - offset, start_off = offset, n; start 443 net/core/datagram.c WARN_ON(start > offset + len); start 445 net/core/datagram.c end = start + skb_frag_size(frag); start 453 net/core/datagram.c vaddr + skb_frag_off(frag) + offset - start, start 462 net/core/datagram.c start = end; start 468 net/core/datagram.c WARN_ON(start > offset + len); start 470 net/core/datagram.c end = start + frag_iter->len; start 474 net/core/datagram.c if (__skb_datagram_iter(frag_iter, offset - start, start 481 net/core/datagram.c start = end; start 555 net/core/datagram.c int start = skb_headlen(skb); start 556 net/core/datagram.c int i, copy = start - offset; start 575 net/core/datagram.c WARN_ON(start > offset + len); start 577 net/core/datagram.c end = start + skb_frag_size(frag); start 584 net/core/datagram.c skb_frag_off(frag) + offset - start, start 593 net/core/datagram.c start = end; start 599 net/core/datagram.c WARN_ON(start > offset + len); start 601 net/core/datagram.c end = start + frag_iter->len; start 606 net/core/datagram.c offset - start, start 613 net/core/datagram.c start = end; start 630 net/core/datagram.c size_t start; start 639 net/core/datagram.c MAX_SKB_FRAGS - frag, &start); start 646 net/core/datagram.c truesize = PAGE_ALIGN(copied + start); start 657 net/core/datagram.c int size = min_t(int, copied, PAGE_SIZE - start); start 658 net/core/datagram.c skb_fill_page_desc(skb, frag++, pages[n], start, size); start 659 net/core/datagram.c start = 0; start 2880 net/core/dev.c int ret = 0, offset, start; start 2896 net/core/dev.c start = skb_checksum_start_offset(skb); start 2897 net/core/dev.c offset = start + offsetof(struct sctphdr, checksum); start 2908 net/core/dev.c crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, start 2909 net/core/dev.c skb->len - start, ~(__u32)0, start 654 net/core/devlink.c int start = cb->args[0]; start 662 net/core/devlink.c if (idx < start) { start 708 net/core/devlink.c int start = cb->args[0]; start 718 net/core/devlink.c if (idx < start) { start 896 net/core/devlink.c int start = cb->args[0]; start 906 net/core/devlink.c if (idx < start) { start 1005 net/core/devlink.c static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, start 1015 net/core/devlink.c if (*p_idx < start) { start 1036 net/core/devlink.c int start = cb->args[0]; start 1047 net/core/devlink.c err = __sb_pool_get_dumpit(msg, start, &idx, devlink, start 1198 net/core/devlink.c static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, start 1210 net/core/devlink.c if (*p_idx < start) { start 1234 net/core/devlink.c int start = cb->args[0]; start 1245 net/core/devlink.c err = __sb_port_pool_get_dumpit(msg, start, &idx, start 1405 net/core/devlink.c int start, int *p_idx, start 1417 net/core/devlink.c if (*p_idx < start) { start 1435 net/core/devlink.c if (*p_idx < start) { start 1461 net/core/devlink.c int start = cb->args[0]; start 1473 net/core/devlink.c err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx, start 3156 net/core/devlink.c int start = cb->args[0]; start 3166 net/core/devlink.c if (idx < start) { start 3419 net/core/devlink.c int start = cb->args[0]; start 3431 net/core/devlink.c if (idx < start) { start 3685 net/core/devlink.c int start = cb->args[0]; start 3696 net/core/devlink.c if (idx < start) { start 4085 net/core/devlink.c int start = cb->args[0]; start 4093 net/core/devlink.c if (idx < start) { start 4497 net/core/devlink.c int *start) start 4509 net/core/devlink.c if (i < *start) { start 4536 net/core/devlink.c *start = ++i; start 5030 net/core/devlink.c int start = cb->args[0]; start 5041 net/core/devlink.c if (idx < start) { start 5166 net/core/devlink.c u64 start = cb->args[0]; start 5178 net/core/devlink.c if (!start) { start 5342 net/core/devlink.c unsigned int start; start 5346 net/core/devlink.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 5349 net/core/devlink.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 5475 net/core/devlink.c int start = cb->args[0]; start 5485 net/core/devlink.c if (idx < start) { start 5681 net/core/devlink.c int start = cb->args[0]; start 5692 net/core/devlink.c if (idx < start) { start 1349 net/core/drop_monitor.c unsigned int start; start 1353 net/core/drop_monitor.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 1355 net/core/drop_monitor.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 1393 net/core/drop_monitor.c unsigned int start; start 1397 net/core/drop_monitor.c start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start 1399 net/core/drop_monitor.c } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); start 30 net/core/fib_rules.c if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || start 199 net/core/fib_rules.c return uid_valid(range->start) && uid_valid(range->end); start 209 net/core/fib_rules.c out.start = make_kuid(current_user_ns(), in->start); start 218 net/core/fib_rules.c from_kuid_munged(current_user_ns(), range->start), start 233 net/core/fib_rules.c port_range->start = pr->start; start 266 net/core/fib_rules.c if (uid_lt(fl->flowi_uid, rule->uid_range.start) || start 437 net/core/fib_rules.c (!uid_eq(r->uid_range.start, rule->uid_range.start) || start 617 net/core/fib_rules.c !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) { start 698 net/core/fib_rules.c if (!uid_eq(r->uid_range.start, rule->uid_range.start) || start 2219 net/core/filter.c BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, start 2222 net/core/filter.c u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; start 2228 net/core/filter.c if (unlikely(flags || end <= start)) start 2232 net/core/filter.c i = msg->sg.start; start 2236 net/core/filter.c if (start < offset + len) start 2241 net/core/filter.c if (unlikely(start >= offset + len)) start 2248 net/core/filter.c bytes_sg_total = start - offset + bytes; start 2330 net/core/filter.c msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; start 2345 net/core/filter.c BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, start 2357 net/core/filter.c i = msg->sg.start; start 2362 net/core/filter.c if (start < offset + l) start 2367 net/core/filter.c if (start >= offset + l) start 2379 net/core/filter.c if (!space || (space == 1 && start != offset)) start 2393 net/core/filter.c front = start - offset; start 2408 net/core/filter.c } else if (start - offset) { start 2412 net/core/filter.c psge->length = start - offset; start 2414 net/core/filter.c rsge.offset += start; start 2507 net/core/filter.c BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, start 2511 net/core/filter.c u64 last = start + len; start 2518 net/core/filter.c i = msg->sg.start; start 2523 net/core/filter.c if (start < offset + l) start 2529 net/core/filter.c if (start >= offset + l || last >= msg->sg.size) start 2556 net/core/filter.c if (start != offset) { start 2558 net/core/filter.c int a = start; start 125 net/core/gen_stats.c unsigned int start; start 130 net/core/gen_stats.c start = u64_stats_fetch_begin_irq(&bcpu->syncp); start 133 net/core/gen_stats.c } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); start 3337 net/core/neighbour.c .start = neigh_stat_seq_start, start 172 net/core/net-procfs.c .start = dev_seq_start, start 179 net/core/net-procfs.c .start = softnet_seq_start, start 269 net/core/net-procfs.c .start = ptype_seq_start, start 337 net/core/net-procfs.c .start = dev_seq_start, start 2190 net/core/skbuff.c int start = skb_headlen(skb); start 2198 net/core/skbuff.c if ((copy = start - offset) > 0) { start 2212 net/core/skbuff.c WARN_ON(start > offset + len); start 2214 net/core/skbuff.c end = start + skb_frag_size(f); start 2224 net/core/skbuff.c skb_frag_off(f) + offset - start, start 2236 net/core/skbuff.c start = end; start 2242 net/core/skbuff.c WARN_ON(start > offset + len); start 2244 net/core/skbuff.c end = start + frag_iter->len; start 2248 net/core/skbuff.c if (skb_copy_bits(frag_iter, offset - start, to, copy)) start 2255 net/core/skbuff.c start = end; start 2552 net/core/skbuff.c int start = skb_headlen(skb); start 2559 net/core/skbuff.c if ((copy = start - offset) > 0) { start 2573 net/core/skbuff.c WARN_ON(start > offset + len); start 2575 net/core/skbuff.c end = start + skb_frag_size(frag); start 2585 net/core/skbuff.c skb_frag_off(frag) + offset - start, start 2597 net/core/skbuff.c start = end; start 2603 net/core/skbuff.c WARN_ON(start > offset + len); start 2605 net/core/skbuff.c end = start + frag_iter->len; start 2609 net/core/skbuff.c if (skb_store_bits(frag_iter, offset - start, start 2617 net/core/skbuff.c start = end; start 2631 net/core/skbuff.c int start = skb_headlen(skb); start 2632 net/core/skbuff.c int i, copy = start - offset; start 2652 net/core/skbuff.c WARN_ON(start > offset + len); start 2654 net/core/skbuff.c end = start + skb_frag_size(frag); start 2665 net/core/skbuff.c skb_frag_off(frag) + offset - start, start 2682 net/core/skbuff.c start = end; start 2688 net/core/skbuff.c WARN_ON(start > offset + len); start 2690 net/core/skbuff.c end = start + frag_iter->len; start 2695 net/core/skbuff.c csum2 = __skb_checksum(frag_iter, offset - start, start 2704 net/core/skbuff.c start = end; start 2729 net/core/skbuff.c int start = skb_headlen(skb); start 2730 net/core/skbuff.c int i, copy = start - offset; start 2750 net/core/skbuff.c WARN_ON(start > offset + len); start 2752 net/core/skbuff.c end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); start 2764 net/core/skbuff.c skb_frag_off(frag) + offset - start, start 2780 net/core/skbuff.c start = end; start 2787 net/core/skbuff.c WARN_ON(start > offset + len); start 2789 net/core/skbuff.c end = start + frag_iter->len; start 2794 net/core/skbuff.c offset - start, start 2803 net/core/skbuff.c start = end; start 4165 net/core/skbuff.c int start = skb_headlen(skb); start 4166 net/core/skbuff.c int i, copy = start - offset; start 4186 net/core/skbuff.c WARN_ON(start > offset + len); start 4188 net/core/skbuff.c end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); start 4197 net/core/skbuff.c skb_frag_off(frag) + offset - start); start 4203 net/core/skbuff.c start = end; start 4209 net/core/skbuff.c WARN_ON(start > offset + len); start 4211 net/core/skbuff.c end = start + frag_iter->len; start 4218 net/core/skbuff.c ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, start 4227 net/core/skbuff.c start = end; start 4665 net/core/skbuff.c bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) start 4667 net/core/skbuff.c u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); start 4668 net/core/skbuff.c u32 csum_start = skb_headroom(skb) + (u32)start; start 4672 net/core/skbuff.c start, off, skb_headroom(skb), skb_headlen(skb)); start 4678 net/core/skbuff.c skb_set_transport_header(skb, start); start 13 net/core/skmsg.c if (msg->sg.end > msg->sg.start && start 17 net/core/skmsg.c if (msg->sg.end < msg->sg.start && start 18 net/core/skmsg.c (elem_first_coalesce > msg->sg.start || start 79 net/core/skmsg.c int i = src->sg.start; start 129 net/core/skmsg.c int i = msg->sg.start; start 147 net/core/skmsg.c msg->sg.start = i; start 153 net/core/skmsg.c int i = msg->sg.start; start 200 net/core/skmsg.c return __sk_msg_free(sk, msg, msg->sg.start, false); start 206 net/core/skmsg.c return __sk_msg_free(sk, msg, msg->sg.start, true); start 214 net/core/skmsg.c u32 i = msg->sg.start; start 235 net/core/skmsg.c msg->sg.start = i; start 287 net/core/skmsg.c msg->sg.curr = msg->sg.start; start 289 net/core/skmsg.c } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= start 290 net/core/skmsg.c sk_msg_iter_dist(msg->sg.start, msg->sg.end)) { start 422 net/core/skmsg.c msg->sg.start = 0; start 457 net/core/skmsg.c goto start; start 463 net/core/skmsg.c start: start 3566 net/core/sock.c .start = proto_seq_start, start 176 net/dccp/ackvec.c u16 start = __ackvec_idx_add(av->av_buf_head, 1), start 177 net/dccp/ackvec.c len = DCCPAV_MAX_ACKVEC_LEN - start; start 181 net/dccp/ackvec.c memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len); start 182 net/dccp/ackvec.c start = 0; start 186 net/dccp/ackvec.c memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num); start 2304 net/decnet/af_decnet.c .start = dn_socket_seq_start, start 1380 net/decnet/dn_dev.c .start = dn_dev_seq_start, start 587 net/decnet/dn_neigh.c .start = dn_neigh_seq_start, start 1844 net/decnet/dn_route.c .start = dn_rt_cache_seq_start, start 666 net/dsa/slave.c unsigned int start; start 674 net/dsa/slave.c start = u64_stats_fetch_begin_irq(&s->syncp); start 679 net/dsa/slave.c } while (u64_stats_fetch_retry_irq(&s->syncp, start)); start 1051 net/dsa/slave.c unsigned int start; start 1060 net/dsa/slave.c start = u64_stats_fetch_begin_irq(&s->syncp); start 1065 net/dsa/slave.c } while (u64_stats_fetch_retry_irq(&s->syncp, start)); start 552 net/ieee802154/nl802154.c long start; start 621 net/ieee802154/nl802154.c if (++idx <= state->start) start 646 net/ieee802154/nl802154.c state->start = idx; start 1669 net/ipv4/af_inet.c unsigned int start; start 1674 net/ipv4/af_inet.c start = u64_stats_fetch_begin_irq(syncp); start 1676 net/ipv4/af_inet.c } while (u64_stats_fetch_retry_irq(syncp, start)); start 1413 net/ipv4/arp.c .start = arp_seq_start, start 42 net/ipv4/bpfilter/sockopt.c err = bpfilter_ops.start(); start 2652 net/ipv4/fib_trie.c .start = fib_trie_seq_start, start 2841 net/ipv4/fib_trie.c .start = fib_route_seq_start, start 93 net/ipv4/fou.c size_t start = ntohs(pd[0]); start 96 net/ipv4/fou.c max_t(size_t, offset + sizeof(u16), start); start 106 net/ipv4/fou.c start, offset, nopartial); start 293 net/ipv4/fou.c size_t start = ntohs(pd[0]); start 303 net/ipv4/fou.c start, offset, grc, nopartial); start 2864 net/ipv4/igmp.c .start = igmp_mc_seq_start, start 2997 net/ipv4/igmp.c .start = igmp_mcf_seq_start, start 228 net/ipv4/ip_fragment.c unsigned int start, end; start 235 net/ipv4/ip_fragment.c start = qp->rid; start 239 net/ipv4/ip_fragment.c rc = qp->q.fragments_tail && (end - start) > max; start 157 net/ipv4/ip_options.c unsigned char *start = sptr+sopt->srr; start 160 net/ipv4/ip_options.c optlen = start[1]; start 161 net/ipv4/ip_options.c soffset = start[2]; start 167 net/ipv4/ip_options.c memcpy(&faddr, &start[soffset-1], 4); start 169 net/ipv4/ip_options.c memcpy(&dptr[doffset-1], &start[soffset-1], 4); start 174 net/ipv4/ip_options.c &start[soffset + 3], 4) == 0) start 179 net/ipv4/ip_options.c dptr[0] = start[0]; start 195 net/ipv4/ip_tunnel_core.c unsigned int start; start 198 net/ipv4/ip_tunnel_core.c start = u64_stats_fetch_begin_irq(&tstats->syncp); start 203 net/ipv4/ip_tunnel_core.c } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); start 216 net/ipv4/ipconfig.c unsigned long start, next_msg; start 272 net/ipv4/ipconfig.c start = jiffies; start 273 net/ipv4/ipconfig.c next_msg = start + msecs_to_jiffies(20000); start 274 net/ipv4/ipconfig.c while (time_before(jiffies, start + start 287 net/ipv4/ipconfig.c elapsed = jiffies_to_msecs(jiffies - start); start 2963 net/ipv4/ipmr.c .start = ipmr_vif_seq_start, start 3023 net/ipv4/ipmr.c .start = ipmr_mfc_seq_start, start 613 net/ipv4/netfilter/arp_tables.c unsigned int start; start 617 net/ipv4/netfilter/arp_tables.c start = read_seqcount_begin(s); start 620 net/ipv4/netfilter/arp_tables.c } while (read_seqcount_retry(s, start)); start 754 net/ipv4/netfilter/ip_tables.c unsigned int start; start 758 net/ipv4/netfilter/ip_tables.c start = read_seqcount_begin(s); start 761 net/ipv4/netfilter/ip_tables.c } while (read_seqcount_retry(s, start)); start 739 net/ipv4/netfilter/ipt_CLUSTERIP.c .start = clusterip_seq_start, start 1011 net/ipv4/ping.c static struct sock *ping_get_first(struct seq_file *seq, int start) start 1017 net/ipv4/ping.c for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; start 1139 net/ipv4/ping.c .start = ping_v4_seq_start, start 1097 net/ipv4/raw.c .start = raw_seq_start, start 229 net/ipv4/route.c .start = rt_cache_seq_start, start 318 net/ipv4/route.c .start = rt_cpu_seq_start, start 62 net/ipv4/tcp_bpf.c i = msg_rx->sg.start; start 74 net/ipv4/tcp_bpf.c msg_rx->sg.start = i; start 103 net/ipv4/tcp_bpf.c msg_rx->sg.start = i; start 104 net/ipv4/tcp_bpf.c if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) { start 178 net/ipv4/tcp_bpf.c tmp->sg.start = msg->sg.start; start 179 net/ipv4/tcp_bpf.c i = msg->sg.start; start 205 net/ipv4/tcp_bpf.c msg->sg.start = i; start 229 net/ipv4/tcp_bpf.c sge = sk_msg_elem(msg, msg->sg.start); start 262 net/ipv4/tcp_bpf.c sk_msg_iter_next(msg, start); start 264 net/ipv4/tcp_bpf.c if (msg->sg.start == msg->sg.end) start 390 net/ipv4/tcp_bpf.c msg->sg.data[msg->sg.start].page_link && start 391 net/ipv4/tcp_bpf.c msg->sg.data[msg->sg.start].length) start 4914 net/ipv4/tcp_input.c struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) start 4928 net/ipv4/tcp_input.c if (!before(start, TCP_SKB_CB(skb)->end_seq)) { start 4942 net/ipv4/tcp_input.c before(TCP_SKB_CB(skb)->seq, start))) { start 4954 net/ipv4/tcp_input.c start = TCP_SKB_CB(skb)->end_seq; start 4962 net/ipv4/tcp_input.c while (before(start, end)) { start 4963 net/ipv4/tcp_input.c int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); start 4974 net/ipv4/tcp_input.c TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; start 4983 net/ipv4/tcp_input.c int offset = start - TCP_SKB_CB(skb)->seq; start 4984 net/ipv4/tcp_input.c int size = TCP_SKB_CB(skb)->end_seq - start; start 4993 net/ipv4/tcp_input.c start += size; start 4995 net/ipv4/tcp_input.c if (!before(start, TCP_SKB_CB(skb)->end_seq)) { start 5021 net/ipv4/tcp_input.c u32 start, end; start 5029 net/ipv4/tcp_input.c start = TCP_SKB_CB(skb)->seq; start 5041 net/ipv4/tcp_input.c before(TCP_SKB_CB(skb)->end_seq, start)) { start 5044 net/ipv4/tcp_input.c end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { start 5046 net/ipv4/tcp_input.c head, skb, start, end); start 5056 net/ipv4/tcp_input.c if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) start 5057 net/ipv4/tcp_input.c start = TCP_SKB_CB(skb)->seq; start 2534 net/ipv4/tcp_ipv4.c .start = tcp_seq_start, start 2812 net/ipv4/udp.c static struct sock *udp_get_first(struct seq_file *seq, int start) start 2819 net/ipv4/udp.c for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; start 2940 net/ipv4/udp.c .start = udp_seq_start, start 4349 net/ipv6/addrconf.c .start = if6_seq_start, start 572 net/ipv6/anycast.c .start = ac6_seq_start, start 727 net/ipv6/calipso.c static int calipso_genopt(unsigned char *buf, u32 start, u32 buf_len, start 738 net/ipv6/calipso.c pad = padding[start & 3]; start 739 net/ipv6/calipso.c if (buf_len <= start + pad + CALIPSO_HDR_LEN) start 750 net/ipv6/calipso.c buf + start + pad + len, start 751 net/ipv6/calipso.c buf_len - start - pad - len); start 757 net/ipv6/calipso.c calipso_pad_write(buf, start, pad); start 758 net/ipv6/calipso.c calipso = buf + start + pad; start 850 net/ipv6/calipso.c static int calipso_opt_find(struct ipv6_opt_hdr *hop, unsigned int *start, start 886 net/ipv6/calipso.c *start = offset_s + calipso_tlv_len(hop, offset_s); start 888 net/ipv6/calipso.c *start = sizeof(*hop); start 916 net/ipv6/calipso.c unsigned int start, end, buf_len, pad, hop_len; start 922 net/ipv6/calipso.c ret_val = calipso_opt_find(hop, &start, &end); start 927 net/ipv6/calipso.c start = sizeof(*hop); start 931 net/ipv6/calipso.c buf_len = hop_len + start - end + CALIPSO_OPT_LEN_MAX_WITH_PAD; start 936 net/ipv6/calipso.c if (start > sizeof(*hop)) start 937 net/ipv6/calipso.c memcpy(new, hop, start); start 938 net/ipv6/calipso.c ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, start 945 net/ipv6/calipso.c buf_len = start + ret_val; start 979 net/ipv6/calipso.c unsigned int start, end, delta, pad, hop_len; start 981 net/ipv6/calipso.c ret_val = calipso_opt_find(hop, &start, &end); start 986 net/ipv6/calipso.c if (start == sizeof(*hop) && end == hop_len) { start 992 net/ipv6/calipso.c delta = (end - start) & ~7; start 997 net/ipv6/calipso.c memcpy(*new, hop, start); start 999 net/ipv6/calipso.c pad = (end - start) & 7; start 1000 net/ipv6/calipso.c calipso_pad_write((unsigned char *)*new, start, pad); start 1002 net/ipv6/calipso.c memcpy((char *)*new + start + pad, (char *)hop + end, start 1307 net/ipv6/calipso.c unsigned int start, end; start 1312 net/ipv6/calipso.c ret_val = calipso_opt_find(hop, &start, &end); start 1316 net/ipv6/calipso.c start = 0; start 1321 net/ipv6/calipso.c ret_val = calipso_genopt(buf, start & 3, sizeof(buf), doi_def, secattr); start 1325 net/ipv6/calipso.c new_end = start + ret_val; start 1341 net/ipv6/calipso.c sizeof(*ip6_hdr) + start); start 1349 net/ipv6/calipso.c if (start == 0) { start 1358 net/ipv6/calipso.c memcpy((char *)hop + start, buf + (start & 3), new_end - start); start 1378 net/ipv6/calipso.c u32 old_hop_len, start = 0, end = 0, delta, size, pad; start 1392 net/ipv6/calipso.c ret_val = calipso_opt_find(old_hop, &start, &end); start 1396 net/ipv6/calipso.c if (start == sizeof(*old_hop) && end == old_hop_len) { start 1403 net/ipv6/calipso.c delta = (end - start) & ~7; start 1406 net/ipv6/calipso.c pad = (end - start) & 7; start 1407 net/ipv6/calipso.c size = sizeof(*ip6_hdr) + start + pad; start 1408 net/ipv6/calipso.c calipso_pad_write((unsigned char *)old_hop, start, pad); start 72 net/ipv6/exthdrs_core.c int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, start 85 net/ipv6/exthdrs_core.c hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); start 91 net/ipv6/exthdrs_core.c start+offsetof(struct frag_hdr, start 108 net/ipv6/exthdrs_core.c start += hdrlen; start 112 net/ipv6/exthdrs_core.c return start; start 189 net/ipv6/exthdrs_core.c unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); start 202 net/ipv6/exthdrs_core.c start = *offset + sizeof(struct ipv6hdr); start 217 net/ipv6/exthdrs_core.c hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); start 224 net/ipv6/exthdrs_core.c rh = skb_header_pointer(skb, start, sizeof(_rh), start 241 net/ipv6/exthdrs_core.c start+offsetof(struct frag_hdr, start 273 net/ipv6/exthdrs_core.c start += hdrlen; start 277 net/ipv6/exthdrs_core.c *offset = start; start 39 net/ipv6/ila/ila_main.c .start = ila_xlat_nl_dump_start, start 2546 net/ipv6/ip6_fib.c .start = ipv6_route_seq_start, start 832 net/ipv6/ip6_flowlabel.c .start = ip6fl_seq_start, start 98 net/ipv6/ip6_tunnel.c unsigned int start; start 103 net/ipv6/ip6_tunnel.c start = u64_stats_fetch_begin_irq(&tstats->syncp); start 108 net/ipv6/ip6_tunnel.c } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); start 133 net/ipv6/ip6_tunnel.c #define for_each_ip6_tunnel_rcu(start) \ start 134 net/ipv6/ip6_tunnel.c for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) start 74 net/ipv6/ip6_vti.c #define for_each_vti6_tunnel_rcu(start) \ start 75 net/ipv6/ip6_vti.c for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) start 447 net/ipv6/ip6mr.c .start = ip6mr_vif_seq_start, start 508 net/ipv6/ip6mr.c .start = ipmr_mfc_seq_start, start 2773 net/ipv6/mcast.c .start = igmp6_mc_seq_start, start 2914 net/ipv6/mcast.c .start = igmp6_mcf_seq_start, start 771 net/ipv6/netfilter/ip6_tables.c unsigned int start; start 775 net/ipv6/netfilter/ip6_tables.c start = read_seqcount_begin(s); start 778 net/ipv6/netfilter/ip6_tables.c } while (read_seqcount_retry(s, start)); start 395 net/ipv6/netfilter/nf_conntrack_reasm.c int start = netoff + sizeof(struct ipv6hdr); start 396 net/ipv6/netfilter/nf_conntrack_reasm.c int len = skb->len - start; start 414 net/ipv6/netfilter/nf_conntrack_reasm.c if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) start 422 net/ipv6/netfilter/nf_conntrack_reasm.c prev_nhoff = start; start 426 net/ipv6/netfilter/nf_conntrack_reasm.c start += hdrlen; start 434 net/ipv6/netfilter/nf_conntrack_reasm.c *fhoff = start; start 212 net/ipv6/ping.c .start = ping_v6_seq_start, start 1322 net/ipv6/raw.c .start = raw_seq_start, start 403 net/ipv6/seg6.c .start = seg6_genl_dumphmac_start, start 277 net/ipv6/sit.c #define for_each_prl_rcu(start) \ start 278 net/ipv6/sit.c for (prl = rcu_dereference(start); \ start 1983 net/ipv6/tcp_ipv6.c .start = tcp_seq_start, start 1626 net/ipv6/udp.c .start = udp_seq_start, start 237 net/kcm/kcmproc.c .start = kcm_seq_start, start 923 net/kcm/kcmsock.c goto start; start 955 net/kcm/kcmsock.c start: start 1918 net/key/af_key.c u32 start; start 1922 net/key/af_key.c start = reqid; start 1932 net/key/af_key.c } while (reqid != start); start 3799 net/key/af_key.c .start = pfkey_seq_start, start 435 net/l2tp/l2tp_core.c start: start 477 net/l2tp/l2tp_core.c goto start; start 261 net/l2tp/l2tp_debugfs.c .start = l2tp_dfs_seq_start, start 1624 net/l2tp/l2tp_ppp.c .start = pppol2tp_seq_start, start 69 net/lapb/lapb_out.c unsigned short modulus, start, end; start 72 net/lapb/lapb_out.c start = !skb_peek(&lapb->ack_queue) ? lapb->va : lapb->vs; start 76 net/lapb/lapb_out.c start != end && skb_peek(&lapb->write_queue)) { start 77 net/lapb/lapb_out.c lapb->vs = start; start 204 net/llc/llc_proc.c .start = llc_seq_start, start 211 net/llc/llc_proc.c .start = llc_seq_start, start 303 net/mac80211/debugfs_sta.c bool start, tx; start 331 net/mac80211/debugfs_sta.c start = true; start 335 net/mac80211/debugfs_sta.c start = false; start 345 net/mac80211/debugfs_sta.c if (!buf || !tx || !start) start 354 net/mac80211/debugfs_sta.c if (start) start 23 net/mac80211/driver-ops.c ret = local->ops->start(&local->hw); start 1982 net/mac80211/ieee80211_i.h u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, start 1986 net/mac80211/ieee80211_i.h static inline void ieee802_11_parse_elems(const u8 *start, size_t len, start 1992 net/mac80211/ieee80211_i.h ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0, start 1146 net/mac80211/iface.c unsigned int start; start 1151 net/mac80211/iface.c start = u64_stats_fetch_begin_irq(&tstats->syncp); start 1156 net/mac80211/iface.c } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); start 1594 net/mac80211/iface.c u64 mask, start, addr, val, inc; start 1683 net/mac80211/iface.c start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | start 1688 net/mac80211/iface.c val = (start & mask); start 1689 net/mac80211/iface.c addr = (start & ~mask) | (val & mask); start 1713 net/mac80211/iface.c addr = (start & ~mask) | (val & mask); start 1714 net/mac80211/iface.c } while (addr != start); start 516 net/mac80211/main.c if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || start 1625 net/mac80211/rx.c int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) start 1634 net/mac80211/rx.c if ((start && in_ps) || (!start && !in_ps)) start 1637 net/mac80211/rx.c if (start) start 2125 net/mac80211/sta_info.c unsigned int start; start 2128 net/mac80211/sta_info.c start = u64_stats_fetch_begin(&sta->rx_stats.syncp); start 2130 net/mac80211/sta_info.c } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start)); start 2167 net/mac80211/sta_info.c unsigned int start; start 2171 net/mac80211/sta_info.c start = u64_stats_fetch_begin(&rxstats->syncp); start 2173 net/mac80211/sta_info.c } while (u64_stats_fetch_retry(&rxstats->syncp, start)); start 72 net/mac80211/tdls.c struct sk_buff *skb, u16 start, u16 end, start 81 net/mac80211/tdls.c for (i = start; i <= end; i += spacing) { start 895 net/mac80211/util.c _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, start 907 net/mac80211/util.c for_each_element(elem, start, len) { start 1268 net/mac80211/util.c if (!for_each_element_completed(elem, start, len)) start 1274 net/mac80211/util.c static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, start 1287 net/mac80211/util.c for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { start 1311 net/mac80211/util.c profile_len = cfg80211_merge_profile(start, len, start 1342 net/mac80211/util.c u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, start 1352 net/mac80211/util.c elems->ie_start = start; start 1358 net/mac80211/util.c ieee802_11_find_bssid_profile(start, len, elems, start 1368 net/mac80211/util.c crc = _ieee802_11_parse_elems_crc(start, len, action, elems, filter, start 3626 net/mac80211/util.c s32 end = data->desc[i].start + data->desc[i].duration - (tsf + 1); start 3650 net/mac80211/util.c data->desc[i].start += skip * data->desc[i].interval; start 3671 net/mac80211/util.c cur = data->desc[i].start - tsf; start 3675 net/mac80211/util.c cur = data->desc[i].start + data->desc[i].duration - tsf; start 3713 net/mac80211/util.c s32 start; start 3719 net/mac80211/util.c start = data->desc[i].start - tsf; start 3720 net/mac80211/util.c if (start <= 0) start 3723 net/mac80211/util.c if (next_offset > start) start 3724 net/mac80211/util.c next_offset = start; start 3751 net/mac80211/util.c data->desc[i].start = le32_to_cpu(desc->start_time); start 36 net/mac802154/driver-ops.h ret = local->ops->start(&local->hw); start 54 net/mac802154/main.c !ops->start || !ops->stop || !ops->set_channel)) start 1077 net/mpls/af_mpls.c unsigned int start; start 1081 net/mpls/af_mpls.c start = u64_stats_fetch_begin(&p->syncp); start 1083 net/mpls/af_mpls.c } while (u64_stats_fetch_retry(&p->syncp, start)); start 1511 net/netfilter/ipset/ip_set_core.c .start = ip_set_dump_start, start 1323 net/netfilter/ipset/ip_set_hash_gen.h mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start) start 1328 net/netfilter/ipset/ip_set_hash_gen.h if (start) { start 592 net/netfilter/ipvs/ip_vs_app.c .start = ip_vs_app_seq_start, start 1142 net/netfilter/ipvs/ip_vs_conn.c .start = ip_vs_conn_seq_start, start 1208 net/netfilter/ipvs/ip_vs_conn.c .start = ip_vs_conn_seq_start, start 2196 net/netfilter/ipvs/ip_vs_ctl.c .start = ip_vs_info_seq_start, start 2250 net/netfilter/ipvs/ip_vs_ctl.c unsigned int start; start 2254 net/netfilter/ipvs/ip_vs_ctl.c start = u64_stats_fetch_begin_irq(&u->syncp); start 2260 net/netfilter/ipvs/ip_vs_ctl.c } while (u64_stats_fetch_retry_irq(&u->syncp, start)); start 3107 net/netfilter/ipvs/ip_vs_ctl.c int start = cb->args[0]; start 3115 net/netfilter/ipvs/ip_vs_ctl.c if (++idx <= start || (svc->ipvs != ipvs)) start 3126 net/netfilter/ipvs/ip_vs_ctl.c if (++idx <= start || (svc->ipvs != ipvs)) start 3318 net/netfilter/ipvs/ip_vs_ctl.c int start = cb->args[0]; start 3338 net/netfilter/ipvs/ip_vs_ctl.c if (++idx <= start) start 64 net/netfilter/ipvs/ip_vs_est.c unsigned int start; start 69 net/netfilter/ipvs/ip_vs_est.c start = u64_stats_fetch_begin(&s->syncp); start 75 net/netfilter/ipvs/ip_vs_est.c } while (u64_stats_fetch_retry(&s->syncp, start)); start 84 net/netfilter/ipvs/ip_vs_est.c start = u64_stats_fetch_begin(&s->syncp); start 90 net/netfilter/ipvs/ip_vs_est.c } while (u64_stats_fetch_retry(&s->syncp, start)); start 100 net/netfilter/ipvs/ip_vs_ftp.c __u16 af, char **start, char **end) start 160 net/netfilter/ipvs/ip_vs_ftp.c *start = s; start 168 net/netfilter/ipvs/ip_vs_ftp.c *start = s; start 253 net/netfilter/ipvs/ip_vs_ftp.c char *start, *end; start 285 net/netfilter/ipvs/ip_vs_ftp.c &start, &end) != 1) start 306 net/netfilter/ipvs/ip_vs_ftp.c &start, &end) != 1) start 376 net/netfilter/ipvs/ip_vs_ftp.c start - data, start 377 net/netfilter/ipvs/ip_vs_ftp.c end - start, start 423 net/netfilter/ipvs/ip_vs_ftp.c char *start, *end; start 497 net/netfilter/ipvs/ip_vs_ftp.c &start, &end) == 1) { start 511 net/netfilter/ipvs/ip_vs_ftp.c &start, &end) == 1) { start 96 net/netfilter/nf_conntrack_amanda.c unsigned int dataoff, start, stop, off, i; start 118 net/netfilter/nf_conntrack_amanda.c start = skb_find_text(skb, dataoff, skb->len, start 120 net/netfilter/nf_conntrack_amanda.c if (start == UINT_MAX) start 122 net/netfilter/nf_conntrack_amanda.c start += dataoff + search[SEARCH_CONNECT].len; start 124 net/netfilter/nf_conntrack_amanda.c stop = skb_find_text(skb, start, skb->len, start 128 net/netfilter/nf_conntrack_amanda.c stop += start; start 131 net/netfilter/nf_conntrack_amanda.c off = skb_find_text(skb, start, stop, search[i].ts); start 134 net/netfilter/nf_conntrack_amanda.c off += start + search[i].len; start 1018 net/netfilter/nf_conntrack_core.c tstamp->start = ktime_get_real_ns(); start 651 net/netfilter/nf_conntrack_expect.c .start = exp_seq_start, start 199 net/netfilter/nf_conntrack_ftp.c static int get_port(const char *data, int start, size_t dlen, char delim, start 205 net/netfilter/nf_conntrack_ftp.c for (i = start; i < dlen; i++) { start 300 net/netfilter/nf_conntrack_netlink.c if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start), start 1329 net/netfilter/nf_conntrack_netlink.c .start = ctnetlink_start, start 2080 net/netfilter/nf_conntrack_netlink.c tstamp->start = ktime_get_real_ns(); start 208 net/netfilter/nf_conntrack_sip.c const char *start = dptr; start 224 net/netfilter/nf_conntrack_sip.c dptr = start; start 242 net/netfilter/nf_conntrack_sip.c const char *start = dptr, *limit = dptr + datalen, *end; start 281 net/netfilter/nf_conntrack_sip.c *matchoff = dptr - start; start 373 net/netfilter/nf_conntrack_sip.c const char *start = dptr, *limit = dptr + datalen; start 415 net/netfilter/nf_conntrack_sip.c *matchoff = dptr - start; start 427 net/netfilter/nf_conntrack_sip.c *matchoff = dptr - start + shift; start 441 net/netfilter/nf_conntrack_sip.c const char *start = dptr, *limit = dptr + datalen; start 455 net/netfilter/nf_conntrack_sip.c *matchoff = dptr - start; start 544 net/netfilter/nf_conntrack_sip.c const char *start; start 551 net/netfilter/nf_conntrack_sip.c start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); start 552 net/netfilter/nf_conntrack_sip.c if (!start) start 554 net/netfilter/nf_conntrack_sip.c start += strlen(name); start 556 net/netfilter/nf_conntrack_sip.c end = ct_sip_header_search(start, limit, ";", strlen(";")); start 560 net/netfilter/nf_conntrack_sip.c *matchoff = start - dptr; start 561 net/netfilter/nf_conntrack_sip.c *matchlen = end - start; start 573 net/netfilter/nf_conntrack_sip.c const char *start, *end; start 579 net/netfilter/nf_conntrack_sip.c start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); start 580 net/netfilter/nf_conntrack_sip.c if (!start) start 583 net/netfilter/nf_conntrack_sip.c start += strlen(name); start 584 net/netfilter/nf_conntrack_sip.c if (!sip_parse_addr(ct, start, &end, addr, limit, delim)) start 586 net/netfilter/nf_conntrack_sip.c *matchoff = start - dptr; start 587 net/netfilter/nf_conntrack_sip.c *matchlen = end - start; start 600 net/netfilter/nf_conntrack_sip.c const char *start; start 607 net/netfilter/nf_conntrack_sip.c start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); start 608 net/netfilter/nf_conntrack_sip.c if (!start) start 611 net/netfilter/nf_conntrack_sip.c start += strlen(name); start 612 net/netfilter/nf_conntrack_sip.c *val = simple_strtoul(start, &end, 0); start 613 net/netfilter/nf_conntrack_sip.c if (start == end) start 616 net/netfilter/nf_conntrack_sip.c *matchoff = start - dptr; start 617 net/netfilter/nf_conntrack_sip.c *matchlen = end - start; start 734 net/netfilter/nf_conntrack_sip.c const char *start = dptr, *limit = dptr + datalen; start 762 net/netfilter/nf_conntrack_sip.c *matchoff = dptr - start; start 774 net/netfilter/nf_conntrack_sip.c *matchoff = dptr - start + shift; start 231 net/netfilter/nf_conntrack_standalone.c delta_time = st->time_now - tstamp->start; start 379 net/netfilter/nf_conntrack_standalone.c .start = ct_seq_start, start 458 net/netfilter/nf_conntrack_standalone.c .start = ct_cpu_seq_start, start 393 net/netfilter/nf_log.c .start = seq_start, start 301 net/netfilter/nf_synproxy_core.c .start = synproxy_cpu_seq_start, start 1631 net/netfilter/nf_tables_api.c struct nft_rule **start; start 2589 net/netfilter/nf_tables_api.c .start= nf_tables_dump_rules_start, start 3502 net/netfilter/nf_tables_api.c .start = nf_tables_dump_sets_start, start 4350 net/netfilter/nf_tables_api.c .start = nf_tables_dump_set_start, start 5463 net/netfilter/nf_tables_api.c .start = nf_tables_dump_obj_start, start 6144 net/netfilter/nf_tables_api.c .start = nf_tables_dump_flowtable_start, start 6658 net/netfilter/nf_tables_api.c kvfree(o->start); start 6671 net/netfilter/nf_tables_api.c old->start = rules; start 279 net/netfilter/nfnetlink_acct.c .start = nfnl_acct_start, start 1096 net/netfilter/nfnetlink_log.c .start = seq_start, start 1488 net/netfilter/nfnetlink_queue.c .start = seq_start, start 76 net/netfilter/nft_exthdr.c unsigned int start; start 84 net/netfilter/nft_exthdr.c start = sizeof(struct iphdr); start 94 net/netfilter/nft_exthdr.c if (skb_copy_bits(skb, start, opt->__data, optlen)) start 109 net/netfilter/nft_exthdr.c *offset = opt->srr + start; start 114 net/netfilter/nft_exthdr.c *offset = opt->rr + start; start 120 net/netfilter/nft_exthdr.c *offset = opt->router_alert + start; start 1522 net/netfilter/x_tables.c .start = xt_table_seq_start, start 1641 net/netfilter/x_tables.c .start = xt_match_seq_start, start 1675 net/netfilter/x_tables.c .start = xt_target_seq_start, start 1213 net/netfilter/xt_hashlimit.c .start = dl_seq_start, start 1220 net/netfilter/xt_hashlimit.c .start = dl_seq_start, start 1227 net/netfilter/xt_hashlimit.c .start = dl_seq_start, start 532 net/netfilter/xt_recent.c .start = recent_seq_start, start 807 net/netlabel/netlabel_kapi.c u32 start, start 812 net/netlabel/netlabel_kapi.c u32 spot = start; start 2342 net/netlink/af_netlink.c if (control->start) { start 2343 net/netlink/af_netlink.c ret = control->start(cb); start 2646 net/netlink/af_netlink.c .start = netlink_seq_start, start 125 net/netlink/genetlink.c int start = 0; start 131 net/netlink/genetlink.c if (start == 0) start 138 net/netlink/genetlink.c start); start 146 net/netlink/genetlink.c start = i; start 325 net/netlink/genetlink.c int start = GENL_START_ALLOC, end = GENL_MAX_ID; start 347 net/netlink/genetlink.c start = end = GENL_ID_CTRL; start 349 net/netlink/genetlink.c start = end = GENL_ID_PMCRAID; start 351 net/netlink/genetlink.c start = end = GENL_ID_VFS_DQUOT; start 366 net/netlink/genetlink.c start, end + 1, GFP_KERNEL); start 467 net/netlink/genetlink.c if (ops->start) { start 469 net/netlink/genetlink.c rc = ops->start(cb); start 566 net/netlink/genetlink.c .start = genl_lock_start, start 578 net/netlink/genetlink.c .start = ops->start, start 1305 net/netrom/af_netrom.c .start = nr_info_start, start 126 net/netrom/nr_out.c unsigned short start, end; start 137 net/netrom/nr_out.c start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs; start 140 net/netrom/nr_out.c if (start == end) start 143 net/netrom/nr_out.c nr->vs = start; start 890 net/netrom/nr_route.c .start = nr_node_start, start 944 net/netrom/nr_route.c .start = nr_neigh_start, start 127 net/nfc/hci/llc.c return llc->ops->start(llc); start 21 net/nfc/hci/llc.h int (*start) (struct nfc_llc *llc); start 77 net/nfc/hci/llc_nop.c .start = llc_nop_start, start 826 net/nfc/hci/llc_shdlc.c .start = llc_shdlc_start, start 1714 net/openvswitch/conntrack.c struct nlattr *start; start 1716 net/openvswitch/conntrack.c start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT); start 1717 net/openvswitch/conntrack.c if (!start) start 1772 net/openvswitch/conntrack.c nla_nest_end(skb, start); start 1781 net/openvswitch/conntrack.c struct nlattr *start; start 1783 net/openvswitch/conntrack.c start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT); start 1784 net/openvswitch/conntrack.c if (!start) start 1820 net/openvswitch/conntrack.c nla_nest_end(skb, start); start 669 net/openvswitch/datapath.c unsigned int start; start 674 net/openvswitch/datapath.c start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); start 676 net/openvswitch/datapath.c } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); start 765 net/openvswitch/datapath.c struct nlattr *start; start 778 net/openvswitch/datapath.c start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS); start 779 net/openvswitch/datapath.c if (start) { start 787 net/openvswitch/datapath.c nla_nest_end(skb, start); start 792 net/openvswitch/datapath.c nla_nest_cancel(skb, start); start 162 net/openvswitch/flow.h unsigned short int start; start 93 net/openvswitch/flow_netlink.c size_t start = rounddown(offset, sizeof(long)); start 101 net/openvswitch/flow_netlink.c if (range->start == range->end) { start 102 net/openvswitch/flow_netlink.c range->start = start; start 107 net/openvswitch/flow_netlink.c if (range->start > start) start 108 net/openvswitch/flow_netlink.c range->start = start; start 1945 net/openvswitch/flow_netlink.c struct nlattr *start; start 1947 net/openvswitch/flow_netlink.c start = nla_nest_start_noflag(skb, OVS_KEY_ATTR_NSH); start 1948 net/openvswitch/flow_netlink.c if (!start) start 1962 net/openvswitch/flow_netlink.c nla_nest_end(skb, start); start 2420 net/openvswitch/flow_netlink.c int rem, start, err; start 2442 net/openvswitch/flow_netlink.c start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log); start 2443 net/openvswitch/flow_netlink.c if (start < 0) start 2444 net/openvswitch/flow_netlink.c return start; start 2471 net/openvswitch/flow_netlink.c add_nested_action_end(*sfa, start); start 2483 net/openvswitch/flow_netlink.c int start, err; start 2489 net/openvswitch/flow_netlink.c start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log); start 2490 net/openvswitch/flow_netlink.c if (start < 0) start 2491 net/openvswitch/flow_netlink.c return start; start 2505 net/openvswitch/flow_netlink.c add_nested_action_end(*sfa, start); start 2524 net/openvswitch/flow_netlink.c mask->range.start = mask->range.end = 0; start 2565 net/openvswitch/flow_netlink.c int err = 0, start, opts_type; start 2591 net/openvswitch/flow_netlink.c start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log); start 2592 net/openvswitch/flow_netlink.c if (start < 0) start 2593 net/openvswitch/flow_netlink.c return start; start 2632 net/openvswitch/flow_netlink.c add_nested_action_end(*sfa, start); start 2805 net/openvswitch/flow_netlink.c int start, len = key_len * 2; start 2810 net/openvswitch/flow_netlink.c start = add_nested_action_start(sfa, start 2813 net/openvswitch/flow_netlink.c if (start < 0) start 2814 net/openvswitch/flow_netlink.c return start; start 2828 net/openvswitch/flow_netlink.c add_nested_action_end(*sfa, start); start 2873 net/openvswitch/flow_netlink.c int start, err; start 2893 net/openvswitch/flow_netlink.c start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CHECK_PKT_LEN, start 2895 net/openvswitch/flow_netlink.c if (start < 0) start 2896 net/openvswitch/flow_netlink.c return start; start 2934 net/openvswitch/flow_netlink.c add_nested_action_end(*sfa, start); start 3239 net/openvswitch/flow_netlink.c struct nlattr *start, *ac_start = NULL, *sample_arg; start 3244 net/openvswitch/flow_netlink.c start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SAMPLE); start 3245 net/openvswitch/flow_netlink.c if (!start) start 3268 net/openvswitch/flow_netlink.c nla_nest_cancel(skb, start); start 3271 net/openvswitch/flow_netlink.c nla_nest_end(skb, start); start 3280 net/openvswitch/flow_netlink.c struct nlattr *start; start 3283 net/openvswitch/flow_netlink.c start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CLONE); start 3284 net/openvswitch/flow_netlink.c if (!start) start 3290 net/openvswitch/flow_netlink.c nla_nest_cancel(skb, start); start 3292 net/openvswitch/flow_netlink.c nla_nest_end(skb, start); start 3300 net/openvswitch/flow_netlink.c struct nlattr *start, *ac_start = NULL; start 3305 net/openvswitch/flow_netlink.c start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN); start 3306 net/openvswitch/flow_netlink.c if (!start) start 3358 net/openvswitch/flow_netlink.c nla_nest_end(skb, start); start 3362 net/openvswitch/flow_netlink.c nla_nest_cancel(skb, start); start 3370 net/openvswitch/flow_netlink.c struct nlattr *start; start 3378 net/openvswitch/flow_netlink.c start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET); start 3379 net/openvswitch/flow_netlink.c if (!start) start 3388 net/openvswitch/flow_netlink.c nla_nest_end(skb, start); start 44 net/openvswitch/flow_table.c return range->end - range->start; start 50 net/openvswitch/flow_table.c int start = full ? 0 : mask->range.start; start 52 net/openvswitch/flow_table.c const long *m = (const long *)((const u8 *)&mask->key + start); start 53 net/openvswitch/flow_table.c const long *s = (const long *)((const u8 *)src + start); start 54 net/openvswitch/flow_table.c long *d = (long *)((u8 *)dst + start); start 373 net/openvswitch/flow_table.c int key_start = range->start; start 412 net/openvswitch/flow_table.c return cmp_key(&flow->key, key, range->start, range->end); start 598 net/openvswitch/flow_table.c const u8 *a_ = (const u8 *)&a->key + a->range.start; start 599 net/openvswitch/flow_table.c const u8 *b_ = (const u8 *)&b->key + b->range.start; start 602 net/openvswitch/flow_table.c && (a->range.start == b->range.start) start 100 net/openvswitch/vport-internal_dev.c unsigned int start; start 105 net/openvswitch/vport-internal_dev.c start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); start 107 net/openvswitch/vport-internal_dev.c } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); start 715 net/packet/af_packet.c u8 *start, *end; start 717 net/packet/af_packet.c start = (u8 *)pbd1; start 720 net/packet/af_packet.c start += PAGE_SIZE; start 723 net/packet/af_packet.c for (; start < end; start += PAGE_SIZE) start 724 net/packet/af_packet.c flush_dcache_page(pgv_to_page(start)); start 736 net/packet/af_packet.c start = (u8 *)pbd1; start 737 net/packet/af_packet.c flush_dcache_page(pgv_to_page(start)); start 2378 net/packet/af_packet.c u8 *start, *end; start 2383 net/packet/af_packet.c for (start = h.raw; start < end; start += PAGE_SIZE) start 2384 net/packet/af_packet.c flush_dcache_page(pgv_to_page(start)); start 4458 net/packet/af_packet.c unsigned long start; start 4483 net/packet/af_packet.c start = vma->vm_start; start 4495 net/packet/af_packet.c err = vm_insert_page(vma, start, page); start 4498 net/packet/af_packet.c start += PAGE_SIZE; start 4618 net/packet/af_packet.c .start = packet_seq_start, start 611 net/phonet/socket.c .start = pn_sock_seq_start, start 780 net/phonet/socket.c .start = pn_res_seq_start, start 72 net/psample/psample.c int start = cb->args[0]; start 80 net/psample/psample.c if (idx < start) { start 164 net/rds/info.c unsigned long start; start 178 net/rds/info.c start = (unsigned long)optval; start 179 net/rds/info.c if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { start 188 net/rds/info.c nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) start 196 net/rds/info.c ret = get_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); start 217 net/rds/info.c iter.offset = start & (PAGE_SIZE - 1); start 390 net/rds/message.c size_t start; start 394 net/rds/message.c 1, &start); start 409 net/rds/message.c sg_set_page(sg, pages, copied, start); start 312 net/rfkill/input.c .start = rfkill_start, start 1422 net/rose/af_rose.c .start = rose_info_start, start 49 net/rose/rose_out.c unsigned short start, end; start 60 net/rose/rose_out.c start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs; start 63 net/rose/rose_out.c if (start == end) start 66 net/rose/rose_out.c rose->vs = start; start 1149 net/rose/rose_route.c .start = rose_node_start, start 1220 net/rose/rose_route.c .start = rose_neigh_start, start 1292 net/rose/rose_route.c .start = rose_route_start, start 128 net/rxrpc/key.c token->kad->start = ntohl(xdr[4]); start 1170 net/rxrpc/key.c ENCODE(token->kad->start); start 123 net/rxrpc/proc.c .start = rxrpc_call_seq_start, start 206 net/rxrpc/proc.c .start = rxrpc_connection_seq_start, start 332 net/rxrpc/proc.c .start = rxrpc_peer_seq_start, start 430 net/sched/act_tunnel_key.c struct nlattr *start; start 432 net/sched/act_tunnel_key.c start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE); start 433 net/sched/act_tunnel_key.c if (!start) start 445 net/sched/act_tunnel_key.c nla_nest_cancel(skb, start); start 453 net/sched/act_tunnel_key.c nla_nest_end(skb, start); start 460 net/sched/act_tunnel_key.c struct nlattr *start; start 466 net/sched/act_tunnel_key.c start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS); start 467 net/sched/act_tunnel_key.c if (!start) start 476 net/sched/act_tunnel_key.c nla_nest_cancel(skb, start); start 480 net/sched/act_tunnel_key.c nla_nest_end(skb, start); start 68 net/sched/cls_flower.c unsigned short int start; start 133 net/sched/cls_flower.c return mask->range.end - mask->range.start; start 155 net/sched/cls_flower.c mask->range.start = rounddown(first, sizeof(long)); start 162 net/sched/cls_flower.c return (u8 *) key + mask->range.start; start 1291 net/sched/cls_flower.c mask->filter_ht_params.key_offset += mask->range.start; start 701 net/sched/sch_htb.c unsigned long start) start 707 net/sched/sch_htb.c unsigned long stop_at = start + 2; start 827 net/sched/sch_htb.c struct htb_class *cl, *start; start 832 net/sched/sch_htb.c start = cl = htb_lookup_leaf(hprio, prio); start 854 net/sched/sch_htb.c if (cl == start) /* fix start if we just deleted it */ start 855 net/sched/sch_htb.c start = next; start 869 net/sched/sch_htb.c } while (cl != start); start 971 net/sched/sch_taprio.c ktime_t *start) start 981 net/sched/sch_taprio.c *start = base; start 999 net/sched/sch_taprio.c *start = ktime_add_ns(base, (n + 1) * cycle); start 1023 net/sched/sch_taprio.c ktime_t start, struct sched_gate_list *new) start 1039 net/sched/sch_taprio.c start = min_t(ktime_t, start, expires); start 1041 net/sched/sch_taprio.c hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); start 1415 net/sched/sch_taprio.c ktime_t start; start 1526 net/sched/sch_taprio.c err = taprio_get_start_time(sch, new_admin, &start); start 1532 net/sched/sch_taprio.c setup_txtime(q, new_admin, start); start 1546 net/sched/sch_taprio.c setup_first_close_time(q, new_admin, start); start 1551 net/sched/sch_taprio.c taprio_start_sched(sch, start, new_admin); start 278 net/sched/sch_teql.c struct Qdisc *start, *q; start 284 net/sched/sch_teql.c start = master->slaves; start 290 net/sched/sch_teql.c q = start; start 334 net/sched/sch_teql.c } while ((q = NEXT_SLAVE(q)) != start); start 90 net/sctp/objcnt.c .start = sctp_objcnt_seq_start, start 1210 net/sctp/outqueue.c ntohs(frags[i].gab.start) + 1)); start 1763 net/sctp/outqueue.c if (tsn_offset >= ntohs(frags[i].gab.start) && start 194 net/sctp/proc.c .start = sctp_eps_seq_start, start 286 net/sctp/proc.c .start = sctp_transport_seq_start, start 363 net/sctp/proc.c .start = sctp_transport_seq_start, start 31 net/sctp/tsnmap.c __u16 len, __u16 *start, __u16 *end); start 146 net/sctp/tsnmap.c iter->start = map->cumulative_tsn_ack_point + 1; start 154 net/sctp/tsnmap.c __u16 *start, __u16 *end) start 160 net/sctp/tsnmap.c if (TSN_lte(map->max_tsn_seen, iter->start)) start 163 net/sctp/tsnmap.c offset = iter->start - map->base_tsn; start 178 net/sctp/tsnmap.c *start = start_ + 1; start 182 net/sctp/tsnmap.c iter->start = map->cumulative_tsn_ack_point + *end + 1; start 273 net/sctp/tsnmap.c __u16 len, __u16 *start, __u16 *end) start 286 net/sctp/tsnmap.c *start = i; start 289 net/sctp/tsnmap.c if (*start) { start 325 net/sctp/tsnmap.c __u16 start = 0, end = 0; start 328 net/sctp/tsnmap.c &start, start 331 net/sctp/tsnmap.c gabs[ngaps].start = htons(start); start 617 net/smc/smc_pnet.c .start = smc_pnet_dump_start start 1424 net/sunrpc/cache.c .start = cache_seq_start_rcu, start 80 net/sunrpc/debugfs.c .start = tasks_start, start 651 net/sunrpc/rpc_pipe.c int start, int eof) start 658 net/sunrpc/rpc_pipe.c for (i = start; i < eof; i++) { start 683 net/sunrpc/rpc_pipe.c int start, int eof) start 688 net/sunrpc/rpc_pipe.c __rpc_depopulate(parent, files, start, eof); start 694 net/sunrpc/rpc_pipe.c int start, int eof, start 702 net/sunrpc/rpc_pipe.c for (i = start; i < eof; i++) { start 728 net/sunrpc/rpc_pipe.c __rpc_depopulate(parent, files, start, eof); start 1419 net/sunrpc/svc_xprt.c .start = svc_pool_stats_start, start 863 net/sunrpc/xprt.c unsigned long start, now = jiffies; start 865 net/sunrpc/xprt.c start = xprt->stat.connect_start + xprt->reestablish_timeout; start 866 net/sunrpc/xprt.c if (time_after(start, now)) start 867 net/sunrpc/xprt.c return start - now; start 87 net/tipc/diag.c .start = tipc_dump_start, start 434 net/tipc/msg.c u32 start, pad; start 443 net/tipc/msg.c start = align(bsz); start 444 net/tipc/msg.c pad = start - bsz; start 454 net/tipc/msg.c if (unlikely(max < (start + msz))) start 461 net/tipc/msg.c skb_copy_to_linear_data_offset(skb, start, msg, msz); start 462 net/tipc/msg.c msg_set_size(bmsg, start + msz); start 174 net/tipc/netlink.c .start = tipc_dump_start, start 820 net/tls/tls_main.c struct nlattr *start; start 823 net/tls/tls_main.c start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); start 824 net/tls/tls_main.c if (!start) start 854 net/tls/tls_main.c nla_nest_end(skb, start); start 859 net/tls/tls_main.c nla_nest_cancel(skb, start); start 48 net/tls/tls_sw.c int start = skb_headlen(skb); start 49 net/tls/tls_sw.c int i, chunk = start - offset; start 69 net/tls/tls_sw.c WARN_ON(start > offset + len); start 71 net/tls/tls_sw.c end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); start 82 net/tls/tls_sw.c start = end; start 89 net/tls/tls_sw.c WARN_ON(start > offset + len); start 91 net/tls/tls_sw.c end = start + frag_iter->len; start 96 net/tls/tls_sw.c ret = __skb_nsg(frag_iter, offset - start, chunk, start 106 net/tls/tls_sw.c start = end; start 488 net/tls/tls_sw.c size_t data_len, u32 start) start 493 net/tls/tls_sw.c struct scatterlist *sge = sk_msg_elem(msg_en, start); start 510 net/tls/tls_sw.c msg_en->sg.curr = start; start 569 net/tls/tls_sw.c i = msg_opl->sg.start; start 602 net/tls/tls_sw.c j = msg_npl->sg.start; start 640 net/tls/tls_sw.c j = msg_npl->sg.start; start 731 net/tls/tls_sw.c if (msg_pl->sg.end < msg_pl->sg.start) { start 732 net/tls/tls_sw.c sg_chain(&msg_pl->sg.data[msg_pl->sg.start], start 733 net/tls/tls_sw.c MAX_SKB_FRAGS - msg_pl->sg.start + 1, start 737 net/tls/tls_sw.c i = msg_pl->sg.start; start 744 net/tls/tls_sw.c i = msg_en->sg.start; start 2821 net/unix/af_unix.c .start = unix_seq_start, start 496 net/wireless/core.h static inline unsigned int elapsed_jiffies_msecs(unsigned long start) start 500 net/wireless/core.h if (end >= start) start 501 net/wireless/core.h return jiffies_to_msecs(end - start); start 503 net/wireless/core.h return jiffies_to_msecs(end + (ULONG_MAX - start) + 1); start 1893 net/wireless/nl80211.c long start; start 2536 net/wireless/nl80211.c if (++idx <= state->start) start 2578 net/wireless/nl80211.c state->start = idx; start 7105 net/wireless/nl80211.c int err, reg_idx, start = cb->args[2]; start 7109 net/wireless/nl80211.c if (cfg80211_regdomain && start == 0) { start 7124 net/wireless/nl80211.c if (++reg_idx <= start) start 8748 net/wireless/nl80211.c int start = cb->args[2], idx = 0; start 8767 net/wireless/nl80211.c if (start == 0) start 8773 net/wireless/nl80211.c if (++idx <= start) start 621 net/wireless/reg.c __be32 start, end, max_bw; start 871 net/wireless/reg.c be32_to_cpu(rule->start), be32_to_cpu(rule->end), start 908 net/wireless/reg.c if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && start 965 net/wireless/reg.c rrule->freq_range.start_freq_khz = be32_to_cpu(rule->start); start 54 net/wireless/sysfs.c char *start = buf; start 63 net/wireless/sysfs.c return buf - start; start 123 net/wireless/wext-proc.c .start = wireless_dev_seq_start, start 139 net/x25/x25_out.c unsigned short start, end; start 164 net/x25/x25_out.c start = skb_peek(&x25->ack_queue) ? x25->vs : x25->va; start 167 net/x25/x25_out.c if (start == end) start 170 net/x25/x25_out.c x25->vs = start; start 149 net/x25/x25_proc.c .start = x25_seq_route_start, start 156 net/x25/x25_proc.c .start = x25_seq_socket_start, start 163 net/x25/x25_proc.c .start = x25_seq_forward_start, start 56 net/xfrm/xfrm_interface.c #define for_each_xfrmi_rcu(start, xi) \ start 57 net/xfrm/xfrm_interface.c for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next)) start 546 net/xfrm/xfrm_interface.c int start; start 550 net/xfrm/xfrm_interface.c start = u64_stats_fetch_begin_irq(&stats->syncp); start 555 net/xfrm/xfrm_interface.c } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); start 43 net/xfrm/xfrm_ipcomp.c const u8 *start = skb->data; start 47 net/xfrm/xfrm_ipcomp.c int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); start 139 net/xfrm/xfrm_ipcomp.c u8 *start = skb->data; start 147 net/xfrm/xfrm_ipcomp.c err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); start 156 net/xfrm/xfrm_ipcomp.c memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); start 3240 net/xfrm/xfrm_policy.c xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, start 3243 net/xfrm/xfrm_policy.c int idx = start; start 3247 net/xfrm/xfrm_policy.c return start; start 3249 net/xfrm/xfrm_policy.c start = -1; start 3254 net/xfrm/xfrm_policy.c if (start == -1) start 3255 net/xfrm/xfrm_policy.c start = -2-idx; start 3259 net/xfrm/xfrm_policy.c return start; start 2598 net/xfrm/xfrm_user.c int (*start)(struct netlink_callback *); start 2612 net/xfrm/xfrm_user.c .start = xfrm_dump_policy_start, start 2663 net/xfrm/xfrm_user.c .start = link->start, start 32 samples/bpf/offwaketime_kern.c struct bpf_map_def SEC("maps") start = { start 133 samples/bpf/offwaketime_kern.c bpf_map_update_elem(&start, &pid, &ts, BPF_ANY); start 137 samples/bpf/offwaketime_kern.c tsp = bpf_map_lookup_elem(&start, &pid); start 143 samples/bpf/offwaketime_kern.c bpf_map_delete_elem(&start, &pid); start 80 samples/vfio-mdev/mtty.c u64 start; start 629 samples/vfio-mdev/mtty.c mdev_state->region_info[index].start = ((u64)start_hi << 32) | start 673 samples/vfio-mdev/mtty.c if (!mdev_state->region_info[index].start) start 916 samples/vfio-mdev/mtty.c unsigned int index, unsigned int start, start 1237 samples/vfio-mdev/mtty.c ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start, start 349 scripts/asn1_compiler.c char *line, *nl, *start, *p, *q; start 408 scripts/asn1_compiler.c start = p; start 428 scripts/asn1_compiler.c memcpy(tokens[tix].content, start, tokens[tix].size); start 468 scripts/asn1_compiler.c memcpy(tokens[tix].content, start, tokens[tix].size); start 245 scripts/basic/fixdep.c const char *start = p; start 248 scripts/basic/fixdep.c if (p > start && (isalnum(p[-1]) || p[-1] == '_')) { start 249 scripts/dtc/dtc.h struct reserve_info *build_reserve_entry(uint64_t start, uint64_t len); start 51 scripts/dtc/libfdt/fdt_wip.c static void fdt_nop_region_(void *start, int len) start 55 scripts/dtc/libfdt/fdt_wip.c for (p = start; (char *)p < ((char *)start + len); p++) start 170 scripts/gcc-plugins/randomize_layout_plugin.c unsigned long start; start 188 scripts/gcc-plugins/randomize_layout_plugin.c size_groups[group_idx].start = i; start 227 scripts/gcc-plugins/randomize_layout_plugin.c for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) { start 58 scripts/genksyms/genksyms.c static struct string_list *concat_list(struct string_list *start, ...); start 350 scripts/genksyms/genksyms.c static struct string_list *concat_list(struct string_list *start, ...) start 355 scripts/genksyms/genksyms.c if (!start) start 357 scripts/genksyms/genksyms.c for (va_start(ap, start); (n = va_arg(ap, struct string_list *));) { start 360 scripts/genksyms/genksyms.c n2->next = start; start 361 scripts/genksyms/genksyms.c start = n; start 364 scripts/genksyms/genksyms.c return start; start 378 scripts/genksyms/genksyms.c struct string_list *copy_list_range(struct string_list *start, start 383 scripts/genksyms/genksyms.c if (start == end) start 385 scripts/genksyms/genksyms.c n = res = copy_node(start); start 386 scripts/genksyms/genksyms.c for (start = start->next; start != end; start = start->next) { start 387 scripts/genksyms/genksyms.c n->next = copy_node(start); start 61 scripts/genksyms/genksyms.h struct string_list *copy_list_range(struct string_list *start, start 80 scripts/insert-sys-cert.c unsigned long start = x[i].sh_addr; start 81 scripts/insert-sys-cert.c unsigned long end = start + x[i].sh_size; start 84 scripts/insert-sys-cert.c if (addr >= start && addr <= end) start 85 scripts/insert-sys-cert.c return addr - start + offset; start 43 scripts/kallsyms.c unsigned long long start, end; start 99 scripts/kallsyms.c ar->start = addr; start 186 scripts/kallsyms.c if (s->addr >= ar->start && s->addr <= ar->end) start 368 scripts/kconfig/gconf.c GtkTextIter start, end; start 375 scripts/kconfig/gconf.c gtk_text_buffer_get_bounds(buffer, &start, &end); start 376 scripts/kconfig/gconf.c gtk_text_buffer_delete(buffer, &start, &end); start 393 scripts/kconfig/gconf.c GtkTextIter start, end; start 397 scripts/kconfig/gconf.c gtk_text_buffer_get_bounds(buffer, &start, &end); start 398 scripts/kconfig/gconf.c gtk_text_buffer_delete(buffer, &start, &end); start 217 scripts/kconfig/lxdialog/dialog.h typedef void (*update_text_fn)(char *buf, size_t start, size_t end, void start 363 scripts/kconfig/mconf.c static void update_text(char *buf, size_t start, size_t end, void *_data) start 370 scripts/kconfig/mconf.c if (pos->offset >= start && pos->offset < end) { start 1869 scripts/mod/modpost.c Elf_Rela *start = (void *)elf->hdr + sechdr->sh_offset; start 1870 scripts/mod/modpost.c Elf_Rela *stop = (void *)start + sechdr->sh_size; start 1878 scripts/mod/modpost.c for (rela = start; rela < stop; rela++) { start 1900 scripts/mod/modpost.c if (is_second_extable_reloc(start, rela, fromsec)) start 1915 scripts/mod/modpost.c Elf_Rel *start = (void *)elf->hdr + sechdr->sh_offset; start 1916 scripts/mod/modpost.c Elf_Rel *stop = (void *)start + sechdr->sh_size; start 1924 scripts/mod/modpost.c for (rel = start; rel < stop; rel++) { start 1960 scripts/mod/modpost.c if (is_second_extable_reloc(start, rel, fromsec)) start 621 security/apparmor/apparmorfs.c state = aa_dfa_match_len(dfa, profile->file.start, start 632 security/apparmor/apparmorfs.c state = aa_dfa_match_len(dfa, profile->policy.start[0], start 2176 security/apparmor/apparmorfs.c .start = p_start, start 61 security/apparmor/crypto.c int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, start 86 security/apparmor/crypto.c error = crypto_shash_update(desc, (u8 *) start, len); start 194 security/apparmor/domain.c unsigned int start, bool subns, u32 request, start 207 security/apparmor/domain.c state = match_component(profile, tp, stack, start); start 223 security/apparmor/domain.c state = match_component(profile, tp, stack, start); start 288 security/apparmor/domain.c u32 request, unsigned int start, start 298 security/apparmor/domain.c return label_match(profile, target, stack, start, true, request, perms); start 626 security/apparmor/domain.c unsigned int state = profile->file.start; start 748 security/apparmor/domain.c unsigned int state = profile->file.start; start 1288 security/apparmor/domain.c profile->file.start, perms); start 266 security/apparmor/file.c unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start, start 271 security/apparmor/file.c state = aa_dfa_match(dfa, start, name); start 285 security/apparmor/file.c aa_str_perms(profile->file.dfa, profile->file.start, name, cond, perms); start 390 security/apparmor/file.c state = aa_str_perms(profile->file.dfa, profile->file.start, lname, start 420 security/apparmor/file.c aa_str_perms(profile->file.dfa, profile->file.start, tname, cond, start 18 security/apparmor/include/crypto.h int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, start 26 security/apparmor/include/crypto.h void *start, size_t len) start 176 security/apparmor/include/file.h unsigned int start; start 185 security/apparmor/include/file.h unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start, start 81 security/apparmor/include/lib.h unsigned int start) start 84 security/apparmor/include/lib.h return aa_dfa_next(dfa, start, 0); start 124 security/apparmor/include/match.h unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, start 126 security/apparmor/include/match.h unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start, start 130 security/apparmor/include/match.h unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start, start 132 security/apparmor/include/match.h unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start, start 153 security/apparmor/include/match.h unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start, start 76 security/apparmor/include/policy.h unsigned int start[AA_CLASS_LAST + 1]; start 220 security/apparmor/include/policy.h return profile->policy.start[class]; start 223 security/apparmor/include/policy.h profile->policy.start[0], &class, 1); start 201 security/apparmor/ipc.c profile->policy.start[AA_CLASS_SIGNAL], start 1332 security/apparmor/label.c struct aa_label *label, unsigned int start, start 1345 security/apparmor/label.c state = match_component(profile, tp, start); start 1361 security/apparmor/label.c state = match_component(profile, tp, start); start 388 security/apparmor/lib.c profile->policy.start[AA_CLASS_LABEL], start 400 security/apparmor/match.c unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, start 407 security/apparmor/match.c unsigned int state = start; start 440 security/apparmor/match.c unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start, start 447 security/apparmor/match.c unsigned int state = start; start 511 security/apparmor/match.c unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start, start 519 security/apparmor/match.c unsigned int state = start, pos; start 572 security/apparmor/match.c unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start, start 580 security/apparmor/match.c unsigned int state = start, pos; start 647 security/apparmor/match.c static unsigned int leftmatch_fb(struct aa_dfa *dfa, unsigned int start, start 655 security/apparmor/match.c unsigned int state = start, pos; start 728 security/apparmor/match.c unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start, start 735 security/apparmor/match.c return leftmatch_fb(dfa, start, str, &wb, count); start 239 security/apparmor/mount.c static int do_match_mnt(struct aa_dfa *dfa, unsigned int start, start 249 security/apparmor/mount.c state = aa_dfa_match(dfa, start, mntpnt); start 345 security/apparmor/mount.c profile->policy.start[AA_CLASS_MOUNT], start 573 security/apparmor/mount.c profile->policy.start[AA_CLASS_MOUNT], start 639 security/apparmor/mount.c profile->policy.start[AA_CLASS_MOUNT], start 70 security/apparmor/policy_unpack.c void *start; start 111 security/apparmor/policy_unpack.c aad(&sa)->iface.pos = e->pos - e->start; start 452 security/apparmor/policy_unpack.c size_t sz = blob - (char *) e->start - start 453 security/apparmor/policy_unpack.c ((e->pos - e->start) & 7); start 831 security/apparmor/policy_unpack.c if (!unpack_u32(e, &profile->policy.start[0], "start")) start 833 security/apparmor/policy_unpack.c profile->policy.start[0] = DFA_START; start 836 security/apparmor/policy_unpack.c profile->policy.start[i] = start 838 security/apparmor/policy_unpack.c profile->policy.start[0], start 854 security/apparmor/policy_unpack.c if (!unpack_u32(e, &profile->file.start, "dfa_start")) start 856 security/apparmor/policy_unpack.c profile->file.start = DFA_START; start 858 security/apparmor/policy_unpack.c profile->policy.start[AA_CLASS_FILE]) { start 860 security/apparmor/policy_unpack.c profile->file.start = profile->policy.start[AA_CLASS_FILE]; start 1061 security/apparmor/policy_unpack.c .start = udata->data, start 1069 security/apparmor/policy_unpack.c void *start; start 1070 security/apparmor/policy_unpack.c error = verify_header(&e, e.pos == e.start, ns); start 1074 security/apparmor/policy_unpack.c start = e.pos; start 1086 security/apparmor/policy_unpack.c error = aa_calc_profile_hash(profile, e.version, start, start 1087 security/apparmor/policy_unpack.c e.pos - start); start 191 security/integrity/ima/ima_fs.c .start = ima_measurements_start, start 257 security/integrity/ima/ima_fs.c .start = ima_measurements_start, start 373 security/integrity/ima/ima_fs.c .start = ima_policy_start, start 22 security/keys/proc.c .start = proc_keys_start, start 34 security/keys/proc.c .start = proc_key_users_start, start 1551 security/selinux/selinuxfs.c .start = sel_avc_stats_seq_start, start 631 security/smack/smackfs.c .start = load2_seq_start, start 801 security/smack/smackfs.c .start = cipso_seq_start, start 984 security/smack/smackfs.c .start = cipso_seq_start, start 1060 security/smack/smackfs.c .start = net4addr_seq_start, start 1324 security/smack/smackfs.c .start = net6addr_seq_start, start 1884 security/smack/smackfs.c .start = onlycap_seq_start, start 2217 security/smack/smackfs.c .start = load_self_seq_start, start 2348 security/smack/smackfs.c .start = load2_seq_start, start 2425 security/smack/smackfs.c .start = load_self2_seq_start, start 2693 security/smack/smackfs.c .start = relabel_self_seq_start, start 1983 security/tomoyo/common.c char *start = str; start 1988 security/tomoyo/common.c return strlen(start) + 1; start 2836 security/tomoyo/common.c char *start = ""; start 2840 security/tomoyo/common.c start = tomoyo_builtin_profile; start 2845 security/tomoyo/common.c start = tomoyo_builtin_exception_policy; start 2850 security/tomoyo/common.c start = tomoyo_builtin_domain_policy; start 2855 security/tomoyo/common.c start = tomoyo_builtin_manager; start 2860 security/tomoyo/common.c start = tomoyo_builtin_stat; start 2866 security/tomoyo/common.c char *end = strchr(start, '\n'); start 2871 security/tomoyo/common.c tomoyo_normalize_line(start); start 2872 security/tomoyo/common.c head.write_buf = start; start 2873 security/tomoyo/common.c tomoyo_parse_policy(&head, start); start 2874 security/tomoyo/common.c start = end + 1; start 259 security/tomoyo/condition.c static const struct tomoyo_path_info *tomoyo_get_dqword(char *start) start 261 security/tomoyo/condition.c char *cp = start + strlen(start) - 1; start 263 security/tomoyo/condition.c if (cp == start || *start++ != '"' || *cp != '"') start 266 security/tomoyo/condition.c if (*start && !tomoyo_correct_word(start)) start 268 security/tomoyo/condition.c return tomoyo_get_name(start); start 156 security/tomoyo/util.c char *start = param->data; start 157 security/tomoyo/util.c char *pos = start; start 167 security/tomoyo/util.c if (tomoyo_correct_domain(start)) start 168 security/tomoyo/util.c return tomoyo_get_name(start); start 435 security/tomoyo/util.c const char *const start = string; start 464 security/tomoyo/util.c if (string - 3 < start || *(string - 3) != '/') start 143 sound/aoa/soundbus/i2sbus/core.c res->start += reg[index * 2]; start 144 sound/aoa/soundbus/i2sbus/core.c res->end = res->start + reg[index * 2 + 1] - 1; start 261 sound/aoa/soundbus/i2sbus/core.c request_mem_region(dev->resources[i].start, start 274 sound/aoa/soundbus/i2sbus/core.c dev->intfregs = ioremap(r->start, rlen); start 280 sound/aoa/soundbus/i2sbus/core.c dev->out.dbdma = ioremap(r->start, rlen); start 286 sound/aoa/soundbus/i2sbus/core.c dev->in.dbdma = ioremap(r->start, rlen); start 557 sound/aoa/soundbus/i2sbus/pcm.c if (cii->codec->start) start 558 sound/aoa/soundbus/i2sbus/pcm.c cii->codec->start(cii, pi->substream); start 125 sound/aoa/soundbus/soundbus.h int (*start)(struct codec_info_item *cii, start 213 sound/arm/aaci.c if (!aacirun->substream || !aacirun->start) { start 251 sound/arm/aaci.c ptr = aacirun->start; start 273 sound/arm/aaci.c if (!aacirun->substream || !aacirun->start) { start 311 sound/arm/aaci.c ptr = aacirun->start; start 549 sound/arm/aaci.c aacirun->start = runtime->dma_area; start 550 sound/arm/aaci.c aacirun->end = aacirun->start + snd_pcm_lib_buffer_bytes(substream); start 551 sound/arm/aaci.c aacirun->ptr = aacirun->start; start 561 sound/arm/aaci.c ssize_t bytes = aacirun->ptr - aacirun->start; start 910 sound/arm/aaci.c (unsigned long long)dev->res.start, dev->irq[0]); start 1000 sound/arm/aaci.c aaci->base = ioremap(dev->res.start, resource_size(&dev->res)); start 215 sound/arm/aaci.h void *start; start 775 sound/atmel/ac97c.c chip->regs = ioremap(regs->start, resource_size(regs)); start 120 sound/core/hrtimer.c .start = snd_hrtimer_start, start 84 sound/core/pcm_timer.c .start = snd_pcm_timer_start, start 144 sound/core/sgbuf.c unsigned int start, end, pg; start 146 sound/core/sgbuf.c start = ofs >> PAGE_SHIFT; start 149 sound/core/sgbuf.c pg = sg->table[start].addr >> PAGE_SHIFT; start 151 sound/core/sgbuf.c start++; start 152 sound/core/sgbuf.c if (start > end) start 155 sound/core/sgbuf.c if ((sg->table[start].addr >> PAGE_SHIFT) != pg) start 156 sound/core/sgbuf.c return (start << PAGE_SHIFT) - ofs; start 501 sound/core/timer.c bool start, unsigned long ticks) start 526 sound/core/timer.c if (start) start 540 sound/core/timer.c if (start) start 542 sound/core/timer.c timer->hw.start(timer); start 548 sound/core/timer.c snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START : start 557 sound/core/timer.c bool start) start 576 sound/core/timer.c snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START : start 618 sound/core/timer.c timer->hw.start(timer); start 883 sound/core/timer.c timer->hw.start(timer); start 994 sound/core/timer.c if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop)) start 1179 sound/core/timer.c .start = snd_timer_s_start, start 93 sound/drivers/dummy.c int (*start)(struct snd_pcm_substream *); start 347 sound/drivers/dummy.c .start = dummy_systimer_start, start 471 sound/drivers/dummy.c .start = dummy_hrtimer_start, start 487 sound/drivers/dummy.c return get_dummy_ops(substream)->start(substream); start 1126 sound/drivers/ml403-ac97cr.c ml403_ac97cr->port = ioremap_nocache(resource->start, start 1128 sound/drivers/ml403-ac97cr.c (resource->start) + 1); start 222 sound/drivers/opl3/opl3_lib.c .start = snd_opl3_timer1_start, start 231 sound/drivers/opl3/opl3_lib.c .start = snd_opl3_timer2_start, start 64 sound/firewire/digi00x/digi00x-transaction.c .start = 0xffffe0000000ull, start 386 sound/firewire/fcp.c .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, start 152 sound/firewire/fireface/ff-transaction.c midi_msg_region.start = 0x000100000000ull * i; start 153 sound/firewire/fireface/ff-transaction.c midi_msg_region.end = midi_msg_region.start + ff->async_handler.length; start 312 sound/firewire/fireworks/fireworks_transaction.c .start = MEMORY_SPACE_EFW_RESPONSE, start 100 sound/firewire/motu/motu-transaction.c .start = 0xffffe0000000ull, start 295 sound/firewire/tascam/tascam-transaction.c .start = 0xffffe0000000ull, start 621 sound/hda/hdac_stream.c void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start, start 633 sound/hda/hdac_stream.c if (start) { start 725 sound/hda/hdac_stream.c void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start) start 727 sound/hda/hdac_stream.c if (start) start 159 sound/i2c/i2c.c if (bus->hw_ops.bit->start) start 160 sound/i2c/i2c.c bus->hw_ops.bit->start(bus); start 430 sound/isa/ad1816a/ad1816a_lib.c .start = snd_ad1816a_timer_start, start 116 sound/isa/gus/gus_timer.c .start = snd_gf1_timer1_start, start 125 sound/isa/gus/gus_timer.c .start = snd_gf1_timer2_start, start 61 sound/isa/gus/gus_volume.c unsigned short start, start 73 sound/isa/gus/gus_volume.c start >>= 4; start 75 sound/isa/gus/gus_volume.c if (start < end) start 76 sound/isa/gus/gus_volume.c us /= end - start; start 78 sound/isa/gus/gus_volume.c us /= start - end; start 44 sound/isa/msnd/msnd.c void snd_msnd_init_queue(void __iomem *base, int start, int size) start 46 sound/isa/msnd/msnd.c writew(PCTODSP_BASED(start), base + JQS_wStart); start 295 sound/isa/msnd/msnd.c int snd_msnd_DAPQ(struct snd_msnd *chip, int start) start 298 sound/isa/msnd/msnd.c int protect = start, nbanks = 0; start 305 sound/isa/msnd/msnd.c while (DAPQ_tail != readw(chip->DAPQ + JQS_wHead) || start) { start 308 sound/isa/msnd/msnd.c if (start) { start 309 sound/isa/msnd/msnd.c start = 0; start 273 sound/isa/msnd/msnd.h void snd_msnd_init_queue(void __iomem *base, int start, int size); start 285 sound/isa/msnd/msnd.h int snd_msnd_DAPQ(struct snd_msnd *chip, int start); start 286 sound/isa/msnd/msnd.h int snd_msnd_DARQ(struct snd_msnd *chip, int start); start 280 sound/isa/sb/emu8000_callback.c addr = vp->reg.start - 1; start 181 sound/isa/sb/emu8000_patch.c sp->v.end -= sp->v.start; start 182 sound/isa/sb/emu8000_patch.c sp->v.loopstart -= sp->v.start; start 183 sound/isa/sb/emu8000_patch.c sp->v.loopend -= sp->v.start; start 184 sound/isa/sb/emu8000_patch.c sp->v.start = 0; start 258 sound/isa/sb/emu8000_patch.c sp->v.start += dram_start; start 972 sound/isa/wss/wss_lib.c .start = snd_wss_timer_start, start 1171 sound/oss/dmasound/dmasound_atari.c char *start, *end; start 1176 sound/oss/dmasound/dmasound_atari.c start = write_sq.buffers[write_sq.front]; start 1177 sound/oss/dmasound/dmasound_atari.c end = start+((write_sq.count == index) ? write_sq.rear_size start 1181 sound/oss/dmasound/dmasound_atari.c DMASNDSetBase(virt_to_phys(start)); start 464 sound/oss/dmasound/dmasound_paula.c u_char *start, *ch0, *ch1, *ch2, *ch3; start 470 sound/oss/dmasound/dmasound_paula.c start = write_sq.buffers[write_sq.front]; start 475 sound/oss/dmasound/dmasound_paula.c ch0 = start; start 476 sound/oss/dmasound/dmasound_paula.c ch1 = start+write_sq_block_size_half; start 479 sound/oss/dmasound/dmasound_paula.c ch0 = start; start 480 sound/oss/dmasound/dmasound_paula.c ch1 = start; start 403 sound/oss/dmasound/dmasound_q40.c u_char *start; start 411 sound/oss/dmasound/dmasound_q40.c start = write_sq.buffers[write_sq.front]; start 414 sound/oss/dmasound/dmasound_q40.c q40_pp=start; start 906 sound/parisc/harmony.c h->hpa = padev->hpa.start; start 910 sound/parisc/harmony.c h->iobase = ioremap_nocache(padev->hpa.start, HARMONY_SIZE); start 913 sound/parisc/harmony.c (unsigned long)padev->hpa.start); start 153 sound/pci/ali5451/ali5451.c unsigned int start; start 161 sound/pci/ali5451/ali5451.c unsigned int start; start 2143 sound/pci/ali5451/ali5451.c codec->chregs.regs.start = ALI_START; start 2148 sound/pci/ali5451/ali5451.c codec->chregs.data.start = 0x00; start 2231 sound/pci/azt3328.c .start = snd_azf3328_timer_start, start 22 sound/pci/ctxfi/cttimer.c void (*start)(struct ct_timer_instance *); start 131 sound/pci/ctxfi/cttimer.c .start = ct_systimer_start, start 323 sound/pci/ctxfi/cttimer.c .start = ct_xfitimer_start, start 368 sound/pci/ctxfi/cttimer.c atimer->ops->start(ti); start 331 sound/pci/emu10k1/emu10k1_callback.c vp->reg.start += mapped_offset; start 439 sound/pci/emu10k1/emu10k1_callback.c addr = vp->reg.start; start 46 sound/pci/emu10k1/emu10k1_patch.c sp->v.end -= sp->v.start; start 47 sound/pci/emu10k1/emu10k1_patch.c sp->v.loopstart -= sp->v.start; start 48 sound/pci/emu10k1/emu10k1_patch.c sp->v.loopend -= sp->v.start; start 49 sound/pci/emu10k1/emu10k1_patch.c sp->v.start = 0; start 188 sound/pci/emu10k1/emu10k1_patch.c sp->v.start += start_addr; start 59 sound/pci/emu10k1/timer.c .start = snd_emu10k1_timer_start, start 213 sound/pci/hda/hda_controller.c bool start; start 233 sound/pci/hda/hda_controller.c start = true; start 238 sound/pci/hda/hda_controller.c start = false; start 261 sound/pci/hda/hda_controller.c if (start) { start 270 sound/pci/hda/hda_controller.c snd_hdac_stream_sync(hstr, start, sbits); start 275 sound/pci/hda/hda_controller.c if (start) start 1056 sound/pci/hda/hda_controller.c void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) start 1062 sound/pci/hda/hda_controller.c snd_hdac_dsp_trigger(azx_stream(azx_dev), start); start 248 sound/pci/hda/hda_tegra.c bus->addr = res->start + HDA_BAR0; start 118 sound/pci/ice1712/ews.c .start = ewx_i2c_start, start 538 sound/pci/ice1712/ice1724.c unsigned int start; /* start & pause bit */ start 553 sound/pci/ice1712/ice1724.c what |= reg->start; start 859 sound/pci/ice1712/ice1724.c if (!(inl(ICEMT1724(ice, DMA_CONTROL)) & reg->start)) start 886 sound/pci/ice1712/ice1724.c .start = VT1724_PDMA0_START, start 893 sound/pci/ice1712/ice1724.c .start = VT1724_PDMA4_START, start 900 sound/pci/ice1712/ice1724.c .start = VT1724_RDMA0_START, start 907 sound/pci/ice1712/ice1724.c .start = VT1724_RDMA1_START, start 1365 sound/pci/ice1712/ice1724.c .start = VT1724_PDMA1_START, start 1371 sound/pci/ice1712/ice1724.c .start = VT1724_PDMA2_START, start 1377 sound/pci/ice1712/ice1724.c .start = VT1724_PDMA3_START, start 137 sound/pci/ice1712/revo.c .start = revo_i2c_start, start 509 sound/pci/lola/lola_pcm.c unsigned int start; start 517 sound/pci/lola/lola_pcm.c start = 1; start 522 sound/pci/lola/lola_pcm.c start = 0; start 532 sound/pci/lola/lola_pcm.c sync_streams = (start && snd_pcm_stream_linked(substream)); start 539 sound/pci/lola/lola_pcm.c if (start) start 543 sound/pci/lola/lola_pcm.c str->running = start; start 544 sound/pci/lola/lola_pcm.c str->paused = !start; start 60 sound/pci/mixart/mixart.c struct mixart_pipe *pipe, int start) start 71 sound/pci/mixart/mixart.c if(start) return 0; /* already started */ start 74 sound/pci/mixart/mixart.c if(!start) return 0; /* already stopped */ start 104 sound/pci/mixart/mixart.c if(start) start 121 sound/pci/mixart/mixart.c if(start) { start 381 sound/pci/mixart/mixart.c static int mixart_set_stream_state(struct mixart_stream *stream, int start) start 396 sound/pci/mixart/mixart.c request.message_id = start ? MSG_STREAM_START_INPUT_STAGE_PACKET : MSG_STREAM_STOP_INPUT_STAGE_PACKET; start 398 sound/pci/mixart/mixart.c request.message_id = start ? MSG_STREAM_START_OUTPUT_STAGE_PACKET : MSG_STREAM_STOP_OUTPUT_STAGE_PACKET; start 496 sound/pci/pcxhr/pcxhr.c int stream_mask, start; start 499 sound/pci/pcxhr/pcxhr.c start = 1; start 506 sound/pci/pcxhr/pcxhr.c start = 0; start 519 sound/pci/pcxhr/pcxhr.c pcxhr_init_rmh(&rmh, start ? CMD_START_STREAM : CMD_STOP_STREAM); start 530 sound/pci/pcxhr/pcxhr.c start ? PCXHR_STREAM_STATUS_STARTED : PCXHR_STREAM_STATUS_STOPPED; start 880 sound/pci/pcxhr/pcxhr.c static int pcxhr_hardware_timer(struct pcxhr_mgr *mgr, int start) start 886 sound/pci/pcxhr/pcxhr.c if (start) { start 894 sound/pci/pcxhr/pcxhr_core.c int capture_mask, int start) start 910 sound/pci/pcxhr/pcxhr_core.c start ? "START" : "STOP", audio_mask, state); start 911 sound/pci/pcxhr/pcxhr_core.c if (start) { start 937 sound/pci/pcxhr/pcxhr_core.c if ((state & audio_mask) == (start ? audio_mask : 0)) start 945 sound/pci/pcxhr/pcxhr_core.c if (!start) { start 138 sound/pci/pcxhr/pcxhr_core.h int pcxhr_set_pipe_state(struct pcxhr_mgr *mgr, int playback_mask, int capture_mask, int start); start 191 sound/pci/ymfpci/ymfpci.h __le32 start; start 208 sound/pci/ymfpci/ymfpci.h __le32 start; /* 32-bit offset */ start 215 sound/pci/ymfpci/ymfpci.h __le32 start; /* 32-bit offset */ start 301 sound/pci/ymfpci/ymfpci_main.c pos = le32_to_cpu(voice->bank[chip->active_bank].start); start 353 sound/pci/ymfpci/ymfpci_main.c pos = le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; start 739 sound/pci/ymfpci/ymfpci_main.c bank->start = 0; start 754 sound/pci/ymfpci/ymfpci_main.c return le32_to_cpu(voice->bank[chip->active_bank].start); start 765 sound/pci/ymfpci/ymfpci_main.c return le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; start 1931 sound/pci/ymfpci/ymfpci_main.c .start = snd_ymfpci_timer_start, start 223 sound/pcmcia/pdaudiocf/pdaudiocf.c if (snd_pdacf_assign_resources(pdacf, link->resource[0]->start, start 234 sound/pcmcia/vx/vxpocket.c if (snd_vxpocket_assign_resources(chip, link->resource[0]->start, start 866 sound/ppc/pmac.c release_mem_region(chip->rsrc[i].start, start 1211 sound/ppc/pmac.c if (request_mem_region(chip->rsrc[i].start, start 1222 sound/ppc/pmac.c ctrl_addr = chip->rsrc[0].start; start 1223 sound/ppc/pmac.c txdma_addr = chip->rsrc[1].start; start 1236 sound/ppc/pmac.c if (request_mem_region(chip->rsrc[i].start, start 1247 sound/ppc/pmac.c ctrl_addr = chip->rsrc[0].start; start 1248 sound/ppc/pmac.c txdma_addr = chip->rsrc[1].start; start 1249 sound/ppc/pmac.c rxdma_addr = chip->rsrc[2].start; start 1307 sound/ppc/pmac.c ioremap(r.start, 0x40); start 55 sound/sh/aica.c .start = ARM_RESET_REGISTER, start 61 sound/sh/aica.c .start = SPU_MEMORY_BASE, start 111 sound/sh/sh_dac_audio.c chip->pdata->start(chip->pdata); start 230 sound/soc/adi/axi-i2s.c i2s->playback_dma_data.addr = res->start + AXI_I2S_REG_TX_FIFO; start 242 sound/soc/adi/axi-i2s.c i2s->capture_dma_data.addr = res->start + AXI_I2S_REG_RX_FIFO; start 214 sound/soc/adi/axi-spdif.c spdif->dma_data.addr = res->start + AXI_SPDIF_REG_TX_FIFO; start 1277 sound/soc/amd/acp-pcm-dma.c status = devm_request_irq(&pdev->dev, res->start, dma_irq_handler, start 641 sound/soc/amd/raven/acp3x-pcm-dma.c adata->acp3x_base = devm_ioremap(&pdev->dev, res->start, start 650 sound/soc/amd/raven/acp3x-pcm-dma.c adata->i2s_irq = res->start; start 80 sound/soc/amd/raven/pci-acp3x.c adata->res[0].start = addr; start 85 sound/soc/amd/raven/pci-acp3x.c adata->res[1].start = pci->irq; start 596 sound/soc/atmel/atmel-classd.c dd->phy_base = res->start; start 684 sound/soc/atmel/atmel-i2s.c dev->playback.addr = (dma_addr_t)mem->start + ATMEL_I2SC_THR; start 686 sound/soc/atmel/atmel-i2s.c dev->capture.addr = (dma_addr_t)mem->start + ATMEL_I2SC_RHR; start 646 sound/soc/atmel/atmel-pdmic.c dd->phy_base = res->start; start 952 sound/soc/atmel/mchp-i2s-mcc.c dev->playback.addr = (dma_addr_t)mem->start + MCHP_I2SMCC_THR; start 953 sound/soc/atmel/mchp-i2s-mcc.c dev->capture.addr = (dma_addr_t)mem->start + MCHP_I2SMCC_RHR; start 246 sound/soc/au1x/ac97c.c if (!devm_request_mem_region(&pdev->dev, iores->start, start 251 sound/soc/au1x/ac97c.c ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start, start 259 sound/soc/au1x/ac97c.c ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; start 264 sound/soc/au1x/ac97c.c ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; start 28 sound/soc/au1x/dma.c u32 start; start 89 sound/soc/au1x/dma.c pointer->start = (u32)(dma_start + (i * period_bytes)); start 119 sound/soc/au1x/dma.c set_dma_addr0(stream->dma, stream->buffer->start); start 121 sound/soc/au1x/dma.c set_dma_addr1(stream->dma, stream->buffer->next->start); start 125 sound/soc/au1x/dma.c set_dma_addr1(stream->dma, stream->buffer->start); start 127 sound/soc/au1x/dma.c set_dma_addr0(stream->dma, stream->buffer->next->start); start 143 sound/soc/au1x/dma.c set_dma_addr0(stream->dma, stream->buffer->next->start); start 150 sound/soc/au1x/dma.c set_dma_addr1(stream->dma, stream->buffer->next->start); start 246 sound/soc/au1x/i2sc.c if (!devm_request_mem_region(&pdev->dev, iores->start, start 251 sound/soc/au1x/i2sc.c ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start, start 259 sound/soc/au1x/i2sc.c ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; start 264 sound/soc/au1x/i2sc.c ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; start 384 sound/soc/au1x/psc-ac97.c wd->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; start 389 sound/soc/au1x/psc-ac97.c wd->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; start 310 sound/soc/au1x/psc-i2s.c wd->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; start 315 sound/soc/au1x/psc-i2s.c wd->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; start 218 sound/soc/bcm/cygnus-pcm.c u32 start, start 232 sound/soc/bcm/cygnus-pcm.c initial_rd = start; start 236 sound/soc/bcm/cygnus-pcm.c initial_wr = start; start 240 sound/soc/bcm/cygnus-pcm.c end = start + bufsize - 1; start 249 sound/soc/bcm/cygnus-pcm.c writel(start, audio_io + p_rbuf->baseaddr); start 671 sound/soc/bcm/cygnus-pcm.c u32 start; start 687 sound/soc/bcm/cygnus-pcm.c start = runtime->dma_addr; start 691 sound/soc/bcm/cygnus-pcm.c ringbuf_set_initial(aio->cygaud->audio, p_rbuf, is_play, start, start 1374 sound/soc/codecs/88pm860x-codec.c pm860x->irq[i] = res->start + chip->irq_base; start 328 sound/soc/codecs/wm8958-dsp2.c static void wm8958_dsp_apply(struct snd_soc_component *component, int path, int start) start 362 sound/soc/codecs/wm8958-dsp2.c path, wm8994->dsp_active, start, pwr_reg, reg); start 364 sound/soc/codecs/wm8958-dsp2.c if (start && ena) { start 395 sound/soc/codecs/wm8958-dsp2.c if (!start && wm8994->dsp_active == path) { start 144 sound/soc/codecs/wm8994.c val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT start 148 sound/soc/codecs/wm8994.c rates[best].start, rates[best].rate, sysclk, start 559 sound/soc/dwc/dwc-i2s.c dev->play_dma_data.pd.addr = res->start + I2S_TXDMA; start 560 sound/soc/dwc/dwc-i2s.c dev->capture_dma_data.pd.addr = res->start + I2S_RXDMA; start 593 sound/soc/dwc/dwc-i2s.c dev->play_dma_data.dt.addr = res->start + I2S_TXDMA; start 603 sound/soc/dwc/dwc-i2s.c dev->capture_dma_data.dt.addr = res->start + I2S_RXDMA; start 878 sound/soc/fsl/fsl_asrc.c asrc_priv->paddr = res->start; start 920 sound/soc/fsl/fsl_dma.c dma->ssi_stx_phys = res.start + REG_SSI_STX0; start 921 sound/soc/fsl/fsl_dma.c dma->ssi_srx_phys = res.start + REG_SSI_SRX0; start 1001 sound/soc/fsl/fsl_esai.c esai_priv->dma_params_tx.addr = res->start + REG_ESAI_ETDR; start 1002 sound/soc/fsl/fsl_esai.c esai_priv->dma_params_rx.addr = res->start + REG_ESAI_ERDR; start 735 sound/soc/fsl/fsl_micfil.c micfil->dma_params_rx.addr = res->start + REG_MICFIL_DATACH0; start 1010 sound/soc/fsl/fsl_sai.c sai->dma_params_rx.addr = res->start + FSL_SAI_RDR0; start 1011 sound/soc/fsl/fsl_sai.c sai->dma_params_tx.addr = res->start + FSL_SAI_TDR0; start 1309 sound/soc/fsl/fsl_spdif.c spdif_priv->dma_params_tx.addr = res->start + REG_SPDIF_STL; start 1310 sound/soc/fsl/fsl_spdif.c spdif_priv->dma_params_rx.addr = res->start + REG_SPDIF_SRL; start 1493 sound/soc/fsl/fsl_ssi.c ssi->ssi_phys = res->start; start 61 sound/soc/fsl/fsl_utils.c (unsigned long long) res.start, dma_channel_np); start 558 sound/soc/fsl/imx-ssi.c ssi->dma_params_rx.addr = res->start + SSI_SRX0; start 559 sound/soc/fsl/imx-ssi.c ssi->dma_params_tx.addr = res->start + SSI_STX0; start 569 sound/soc/fsl/imx-ssi.c imx_pcm_dma_params_init_data(&ssi->filter_data_tx, res->start, start 575 sound/soc/fsl/imx-ssi.c imx_pcm_dma_params_init_data(&ssi->filter_data_rx, res->start, start 386 sound/soc/fsl/mpc5200_dma.c regs = ioremap(res.start, resource_size(&res)); start 419 sound/soc/fsl/mpc5200_dma.c fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32); start 431 sound/soc/fsl/mpc8610_hpcd.c guts_phys = res.start; start 440 sound/soc/fsl/p1022_ds.c guts_phys = res.start; start 389 sound/soc/fsl/p1022_rdk.c guts_phys = res.start; start 562 sound/soc/hisilicon/hi6210-i2s.c i2s->base_phys = (phys_addr_t)res->start; start 470 sound/soc/img/img-i2s-in.c i2s->dma_data.addr = res->start + IMG_I2S_IN_RX_FIFO; start 509 sound/soc/img/img-i2s-out.c i2s->dma_data.addr = res->start + IMG_I2S_OUT_TX_FIFO; start 265 sound/soc/img/img-parallel-out.c prl->dma_data.addr = res->start + IMG_PRL_OUT_TX_FIFO; start 776 sound/soc/img/img-spdif-in.c spdif->dma_data.addr = res->start + IMG_SPDIF_IN_RX_FIFO_OFFSET; start 384 sound/soc/img/img-spdif-out.c spdif->dma_data.addr = res->start + IMG_SPDIF_OUT_TX_FIFO; start 164 sound/soc/intel/atom/sst/sst.c .start = sst_start_mrfld, start 541 sound/soc/intel/atom/sst/sst.c ctx->ops->start(ctx); start 417 sound/soc/intel/atom/sst/sst.h int (*start)(struct intel_sst_drv *ctx); start 162 sound/soc/intel/atom/sst/sst_acpi.c dev_info(ctx->dev, "LPE base: %#x size:%#x", (unsigned int) rsrc->start, start 165 sound/soc/intel/atom/sst/sst_acpi.c ctx->iram_base = rsrc->start + ctx->pdata->res_info->iram_offset; start 175 sound/soc/intel/atom/sst/sst_acpi.c ctx->dram_base = rsrc->start + ctx->pdata->res_info->dram_offset; start 185 sound/soc/intel/atom/sst/sst_acpi.c ctx->shim_phy_add = rsrc->start + ctx->pdata->res_info->shim_offset; start 198 sound/soc/intel/atom/sst/sst_acpi.c ctx->mailbox_add = rsrc->start + ctx->pdata->res_info->mbox_offset; start 216 sound/soc/intel/atom/sst/sst_acpi.c ctx->ddr_base = rsrc->start; start 430 sound/soc/intel/atom/sst/sst_loader.c ret_val = sst_drv_ctx->ops->start(sst_drv_ctx); start 238 sound/soc/intel/baytrail/sst-baytrail-dsp.c u32 start; start 323 sound/soc/intel/baytrail/sst-baytrail-dsp.c offset = region[i].start; start 324 sound/soc/intel/baytrail/sst-baytrail-dsp.c size = (region[i].end - region[i].start) / region[i].blocks; start 120 sound/soc/intel/common/sst-acpi.c sst_pdata->lpe_base = mmio->start; start 129 sound/soc/intel/common/sst-acpi.c sst_pdata->pcicfg_base = mmio->start; start 138 sound/soc/intel/common/sst-acpi.c sst_pdata->fw_base = mmio->start; start 294 sound/soc/intel/common/sst-firmware.c mem.start = sst->addr.lpe_base + sst_pdata->dma_base; start 443 sound/soc/intel/haswell/sst-haswell-dsp.c u32 start; start 662 sound/soc/intel/haswell/sst-haswell-dsp.c offset = region[i].start; start 663 sound/soc/intel/haswell/sst-haswell-dsp.c size = (region[i].end - region[i].start) / region[i].blocks; start 103 sound/soc/intel/skylake/skl-messages.c static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) start 116 sound/soc/intel/skylake/skl-messages.c snd_hdac_dsp_trigger(stream, start); start 430 sound/soc/intel/skylake/skl-pcm.c int start; start 444 sound/soc/intel/skylake/skl-pcm.c start = 1; start 450 sound/soc/intel/skylake/skl-pcm.c start = 0; start 459 sound/soc/intel/skylake/skl-pcm.c if (start) { start 1103 sound/soc/intel/skylake/skl-pcm.c bool start; start 1120 sound/soc/intel/skylake/skl-pcm.c start = true; start 1126 sound/soc/intel/skylake/skl-pcm.c start = false; start 1150 sound/soc/intel/skylake/skl-pcm.c if (start) start 1157 sound/soc/intel/skylake/skl-pcm.c snd_hdac_stream_sync(hstr, start, sbits); start 1163 sound/soc/intel/skylake/skl-pcm.c if (start) start 248 sound/soc/intel/skylake/skl-sst-cldma.c bool start = true; start 270 sound/soc/intel/skylake/skl-sst-cldma.c skl_cldma_fill_buffer(ctx, size, curr_pos, true, start); start 273 sound/soc/intel/skylake/skl-sst-cldma.c start = false; start 295 sound/soc/intel/skylake/skl-sst-cldma.c curr_pos, false, start); start 163 sound/soc/intel/skylake/skl-sst-dsp.h int (*trigger)(struct device *dev, bool start, int stream_tag); start 515 sound/soc/jz4740/jz4740-i2s.c i2s->phys_base = mem->start; start 89 sound/soc/pxa/mmp-pcm.c (chan->chan_id == dma_data->dma_res->start)) { start 1300 sound/soc/qcom/qdsp6/q6afe.c struct afe_port_cmd_device_start *start; start 1328 sound/soc/qcom/qdsp6/q6afe.c pkt_size = APR_HDR_SIZE + sizeof(*start); start 1334 sound/soc/qcom/qdsp6/q6afe.c start = p + APR_HDR_SIZE; start 1345 sound/soc/qcom/qdsp6/q6afe.c start->port_id = port_id; start 634 sound/soc/rockchip/rockchip_i2s.c i2s->playback_dma_data.addr = res->start + I2S_TXDR; start 638 sound/soc/rockchip/rockchip_i2s.c i2s->capture_dma_data.addr = res->start + I2S_RXDR; start 508 sound/soc/rockchip/rockchip_pdm.c pdm->capture_dma_data.addr = res->start + PDM_RXFIFO_DATA; start 348 sound/soc/rockchip/rockchip_spdif.c spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR; start 1450 sound/soc/samsung/i2s.c regs_base = res->start; start 41 sound/soc/samsung/idma.c dma_addr_t start; start 156 sound/soc/samsung/idma.c prtd->start = prtd->pos = runtime->dma_addr; start 177 sound/soc/samsung/idma.c prtd->pos = prtd->start; start 229 sound/soc/samsung/idma.c res = src - prtd->start; start 269 sound/soc/samsung/idma.c addr %= (u32)(prtd->end - prtd->start); start 537 sound/soc/samsung/pcm.c s3c_pcm_stereo_in[pdev->id].addr = mem_res->start + S3C_PCM_RXFIFO; start 538 sound/soc/samsung/pcm.c s3c_pcm_stereo_out[pdev->id].addr = mem_res->start + S3C_PCM_TXFIFO; start 167 sound/soc/samsung/s3c2412-i2s.c s3c2412_i2s_pcm_stereo_out.addr = res->start + S3C2412_IISTXD; start 169 sound/soc/samsung/s3c2412-i2s.c s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD; start 440 sound/soc/samsung/s3c24xx-i2s.c s3c24xx_i2s_pcm_stereo_out.addr = res->start + S3C2410_IISFIFO; start 441 sound/soc/samsung/s3c24xx-i2s.c s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO; start 404 sound/soc/samsung/spdif.c if (!request_mem_region(mem_res->start, start 411 sound/soc/samsung/spdif.c spdif->regs = ioremap(mem_res->start, 0x100); start 419 sound/soc/samsung/spdif.c spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF; start 447 sound/soc/samsung/spdif.c release_mem_region(mem_res->start, resource_size(mem_res)); start 465 sound/soc/samsung/spdif.c release_mem_region(mem_res->start, resource_size(mem_res)); start 1342 sound/soc/sh/fsi.c int start) start 1346 sound/soc/sh/fsi.c u32 enable = start ? DMA_ON : 0; start 1958 sound/soc/sh/fsi.c res->start, resource_size(res)); start 1971 sound/soc/sh/fsi.c fsi->phys = res->start; start 1984 sound/soc/sh/fsi.c fsi->phys = res->start + 0x40; start 121 sound/soc/sh/rcar/cmd.c .start = rsnd_cmd_start, start 735 sound/soc/sh/rcar/core.c ret = rsnd_dai_call(start, io, priv); start 295 sound/soc/sh/rcar/dma.c .start = rsnd_dmaen_start, start 483 sound/soc/sh/rcar/dma.c .start = rsnd_dmapp_start, start 187 sound/soc/sh/rcar/gen.c gen->res[reg_id] = res->start; start 302 sound/soc/sh/rcar/rsnd.h int (*start)(struct rsnd_mod *mod, start 597 sound/soc/sh/rcar/src.c .start = rsnd_src_start, start 931 sound/soc/sh/rcar/ssi.c .start = rsnd_ssi_start, start 1018 sound/soc/sh/rcar/ssi.c .start = rsnd_ssi_start, start 320 sound/soc/sh/rcar/ssiu.c .start = rsnd_ssiu_start_gen2, start 745 sound/soc/sh/siu_dai.c region = devm_request_mem_region(&pdev->dev, res->start, start 752 sound/soc/sh/siu_dai.c info->pram = devm_ioremap(&pdev->dev, res->start, PRAM_SIZE); start 755 sound/soc/sh/siu_dai.c info->xram = devm_ioremap(&pdev->dev, res->start + XRAM_OFFSET, start 759 sound/soc/sh/siu_dai.c info->yram = devm_ioremap(&pdev->dev, res->start + YRAM_OFFSET, start 763 sound/soc/sh/siu_dai.c info->reg = devm_ioremap(&pdev->dev, res->start + REG_OFFSET, start 34 sound/soc/sof/debug.c ktime_t start, end, test_end; start 51 sound/soc/sof/debug.c start = ktime_get(); start 60 sound/soc/sof/debug.c ipc_response_time = ktime_to_ns(ktime_sub(end, start)); start 266 sound/soc/sof/imx/imx8.c base = mmio->start; start 296 sound/soc/sof/imx/imx8.c sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start, start 297 sound/soc/sof/imx/imx8.c res.end - res.start + start 439 sound/soc/sof/intel/bdw.c base = mmio->start; start 466 sound/soc/sof/intel/bdw.c base = mmio->start; start 549 sound/soc/sof/intel/byt.c base = mmio->start; start 577 sound/soc/sof/intel/byt.c base = mmio->start; start 238 sound/soc/spear/spdif_in.c host->dma_params.addr = res_fifo->start; start 300 sound/soc/spear/spdif_out.c host->dma_params.addr = res->start + SPDIF_OUT_FIFO_DATA; start 925 sound/soc/sprd/sprd-mcdt.c chan->fifo_phys = res->start + MCDT_CH0_RXD + i * 4; start 929 sound/soc/sprd/sprd-mcdt.c chan->fifo_phys = res->start + MCDT_CH0_TXD + start 464 sound/soc/sprd/sprd-pcm-compress.c ret = stream->compr_ops->start(stream_id); start 45 sound/soc/sprd/sprd-pcm-dma.h int (*start)(int str_id); start 425 sound/soc/sti/sti_uniperif.c uni->fifo_phys_address = uni->mem_region->start + start 829 sound/soc/stm/stm32_i2s.c i2s->phys_addr = res->start; start 1368 sound/soc/stm/stm32_sai_sub.c sai->phys_addr = res->start; start 914 sound/soc/stm/stm32_spdifrx.c spdifrx->phys_addr = res->start; start 1786 sound/soc/sunxi/sun4i-codec.c scodec->playback_dma_data.addr = res->start + quirks->reg_dac_txdata; start 1791 sound/soc/sunxi/sun4i-codec.c scodec->capture_dma_data.addr = res->start + quirks->reg_adc_rxdata; start 1246 sound/soc/sunxi/sun4i-i2s.c i2s->playback_dma_data.addr = res->start + start 1250 sound/soc/sunxi/sun4i-i2s.c i2s->capture_dma_data.addr = res->start + SUN4I_I2S_FIFO_RX_REG; start 549 sound/soc/sunxi/sun4i-spdif.c host->dma_params_tx.addr = res->start + quirks->reg_dac_txdata; start 360 sound/soc/tegra/tegra20_ac97.c ac97->capture_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_RX1; start 364 sound/soc/tegra/tegra20_ac97.c ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1; start 364 sound/soc/tegra/tegra20_i2s.c i2s->capture_dma_data.addr = mem->start + TEGRA20_I2S_FIFO2; start 368 sound/soc/tegra/tegra20_i2s.c i2s->playback_dma_data.addr = mem->start + TEGRA20_I2S_FIFO1; start 291 sound/soc/tegra/tegra20_spdif.c spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT; start 294 sound/soc/tegra/tegra20_spdif.c spdif->playback_dma_data.slave_id = dmareq->start; start 579 sound/soc/tegra/tegra30_ahub.c ahub->apbif_addr = res0->start; start 679 sound/soc/ti/davinci-i2s.c dma_data->addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG); start 684 sound/soc/ti/davinci-i2s.c *dma = res->start; start 694 sound/soc/ti/davinci-i2s.c dma_data->addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG); start 699 sound/soc/ti/davinci-i2s.c *dma = res->start; start 2244 sound/soc/ti/davinci-mcasp.c dma_data->addr = dat->start; start 2246 sound/soc/ti/davinci-mcasp.c dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata); start 2251 sound/soc/ti/davinci-mcasp.c *dma = res->start; start 2265 sound/soc/ti/davinci-mcasp.c dma_data->addr = dat->start; start 2268 sound/soc/ti/davinci-mcasp.c mem->start + davinci_mcasp_rxdma_offset(pdata); start 2273 sound/soc/ti/davinci-mcasp.c *dma = res->start; start 485 sound/soc/ti/omap-dmic.c dmic->dma_data.addr = res->start + OMAP_DMIC_DATA_REG; start 360 sound/soc/ti/omap-mcbsp-st.c st_data->io_base_st = devm_ioremap(mcbsp->dev, res->start, start 634 sound/soc/ti/omap-mcbsp.c mcbsp->phys_base = res->start; start 641 sound/soc/ti/omap-mcbsp.c mcbsp->phys_dma_base = res->start; start 669 sound/soc/ti/omap-mcbsp.c mcbsp->dma_req[0] = res->start; start 677 sound/soc/ti/omap-mcbsp.c mcbsp->dma_req[1] = res->start; start 559 sound/soc/ti/omap-mcpdm.c mcpdm->dma_data[0].addr = res->start + MCPDM_REG_DN_DATA; start 560 sound/soc/ti/omap-mcpdm.c mcpdm->dma_data[1].addr = res->start + MCPDM_REG_UP_DATA; start 193 sound/soc/txx9/txx9aclc-ac97.c drvdata->physbase = r->start; start 194 sound/soc/txx9/txx9aclc-ac97.c if (sizeof(drvdata->physbase) > sizeof(r->start) && start 195 sound/soc/txx9/txx9aclc-ac97.c r->start >= TXX9_DIRECTMAP_BASE && start 196 sound/soc/txx9/txx9aclc-ac97.c r->start < TXX9_DIRECTMAP_BASE + 0x400000) start 334 sound/soc/txx9/txx9aclc.c (int)dmadata->dma_res->start); start 1135 sound/soc/uniphier/aio-core.c int aiodma_rb_set_buffer(struct uniphier_aio_sub *sub, u64 start, u64 end, start 1139 sound/soc/uniphier/aio-core.c u64 size = end - start; start 1142 sound/soc/uniphier/aio-core.c if (end < start || period < 0) start 1147 sound/soc/uniphier/aio-core.c lower_32_bits(start)); start 1149 sound/soc/uniphier/aio-core.c upper_32_bits(start)); start 1162 sound/soc/uniphier/aio-core.c aiodma_rb_set_rp(sub, start); start 1170 sound/soc/uniphier/aio-core.c aiodma_rb_set_wp(sub, start); start 1188 sound/soc/uniphier/aio-core.c void aiodma_rb_sync(struct uniphier_aio_sub *sub, u64 start, u64 size, start 1192 sound/soc/uniphier/aio-core.c sub->rd_offs = aiodma_rb_get_rp(sub) - start; start 1202 sound/soc/uniphier/aio-core.c aiodma_rb_set_wp(sub, sub->wr_offs + start); start 1204 sound/soc/uniphier/aio-core.c sub->wr_offs = aiodma_rb_get_wp(sub) - start; start 1214 sound/soc/uniphier/aio-core.c aiodma_rb_set_rp(sub, sub->rd_offs + start); start 347 sound/soc/uniphier/aio.h int aiodma_rb_set_buffer(struct uniphier_aio_sub *sub, u64 start, u64 end, start 349 sound/soc/uniphier/aio.h void aiodma_rb_sync(struct uniphier_aio_sub *sub, u64 start, u64 size, start 710 sound/soc/ux500/ux500_msp_i2s.c msp->playback_dma_data.tx_rx_addr = res->start + MSP_DR; start 711 sound/soc/ux500/ux500_msp_i2s.c msp->capture_dma_data.tx_rx_addr = res->start + MSP_DR; start 713 sound/soc/ux500/ux500_msp_i2s.c msp->registers = devm_ioremap(&pdev->dev, res->start, start 282 sound/soc/xilinx/xlnx_spdif.c ret = devm_request_irq(dev, res->start, start 410 sound/soc/zte/zx-i2s.c zx_i2s->mapbase = res->start; start 322 sound/soc/zte/zx-spdif.c zx_spdif->mapbase = res->start; start 402 sound/soc/zte/zx-tdm.c zx_tdm->phy_addr = res->start; start 1034 sound/sparc/amd7930.c (unsigned long long)rp->start, start 879 sound/sparc/cs4231.c .start = snd_cs4231_timer_start, start 1869 sound/sparc/cs4231.c (unsigned long long)rp->start, start 2059 sound/sparc/cs4231.c op->resource[0].start, start 2629 sound/sparc/dbri.c rp->flags & 0xffL, (unsigned long long)rp->start, irq); start 249 sound/synth/emux/emux_effect.c vp->reg.start += effect_get_offset(chan, EMUX_FX_SAMPLE_START, start 84 sound/synth/emux/emux_proc.c vp->reg.start, vp->reg.end, vp->reg.loopstart, vp->reg.loopend); start 437 sound/synth/emux/soundfont.c zp->v.start == map.src_instr && start 464 sound/synth/emux/soundfont.c zp->v.start = map.src_instr; start 660 sound/synth/emux/soundfont.c avp->start += sample->v.start; start 841 sound/synth/emux/soundfont.c calc_gus_envelope_time(int rate, int start, int end) start 848 sound/synth/emux/soundfont.c t = end - start; start 965 sound/synth/emux/soundfont.c smp->v.start = 0; start 1289 sound/synth/emux/soundfont.c preset = zp->v.start; start 301 sound/usb/line6/pcm.c int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type, bool start) start 316 sound/usb/line6/pcm.c if (start) { start 192 sound/usb/line6/pcm.h bool start); start 156 sound/usb/mixer_scarlett.c int start; start 187 sound/usb/mixer_scarlett.c .start = 0, start 196 sound/usb/mixer_scarlett.c .start = 0, start 205 sound/usb/mixer_scarlett.c .start = 1, start 214 sound/usb/mixer_scarlett.c .start = 0, start 410 sound/usb/mixer_scarlett.c val = clamp(val - opt->start, 0, opt->len-1); start 429 sound/usb/mixer_scarlett.c val = val + opt->start; start 633 sound/usb/mixer_scarlett.c .start = -1, start 640 sound/usb/mixer_scarlett.c .start = -1, start 675 sound/usb/mixer_scarlett.c .start = -1, start 682 sound/usb/mixer_scarlett.c .start = -1, start 714 sound/usb/mixer_scarlett.c .start = -1, start 721 sound/usb/mixer_scarlett.c .start = -1, start 751 sound/usb/mixer_scarlett.c .start = -1, start 758 sound/usb/mixer_scarlett.c .start = -1, start 793 sound/usb/mixer_scarlett.c .start = -1, start 800 sound/usb/mixer_scarlett.c .start = -1, start 463 sound/usb/usx2y/usbusx2yaudio.c goto start; start 466 sound/usb/usx2y/usbusx2yaudio.c start: start 414 sound/usb/usx2y/usx2yhwdeppcm.c goto start; start 417 sound/usb/usx2y/usx2yhwdeppcm.c start: start 1757 sound/x86/intel_hdmi_audio.c __func__, (unsigned int)res_mmio->start, start 1760 sound/x86/intel_hdmi_audio.c card_ctx->mmio_start = ioremap_nocache(res_mmio->start, start 1298 tools/bpf/bpf_dbg.c static char **shell_completion(const char *buf, int start, int end) start 1302 tools/bpf/bpf_dbg.c if (start == 0) start 20 tools/bpf/bpftool/cfg.c struct bpf_insn *start; start 68 tools/bpf/bpftool/cfg.c if (func->start == insn) start 70 tools/bpf/bpftool/cfg.c else if (func->start > insn) start 80 tools/bpf/bpftool/cfg.c new_func->start = insn; start 154 tools/bpf/bpftool/cfg.c func->end = func_next(func)->start - 1; start 170 tools/bpf/bpftool/cfg.c cur = func->start; start 405 tools/bpf/bpftool/cfg.c start_idx = bb->head - func->start; start 7 tools/build/feature/test-dwarf_getlocations.c Dwarf_Addr base, start, end; start 12 tools/build/feature/test-dwarf_getlocations.c return (int)dwarf_getlocations(&attr, offset, &base, &start, &end, &op, &nops); start 8 tools/build/feature/test-libunwind-debug-frame-aarch64.c const char *obj_name, unw_word_t start, start 8 tools/build/feature/test-libunwind-debug-frame-arm.c const char *obj_name, unw_word_t start, start 8 tools/build/feature/test-libunwind-debug-frame.c const char *obj_name, unw_word_t start, start 140 tools/hv/hv_fcopy_daemon.c struct hv_start_fcopy start; start 215 tools/hv/hv_fcopy_daemon.c error = hv_start_fcopy(&buffer.start); start 1051 tools/hv/hv_kvp_daemon.c char *start; start 1058 tools/hv/hv_kvp_daemon.c start = in_buf + *offset; start 1060 tools/hv/hv_kvp_daemon.c x = strchr(start, ';'); start 1064 tools/hv/hv_kvp_daemon.c x = start + strlen(start); start 1066 tools/hv/hv_kvp_daemon.c if (strlen(start) != 0) { start 1071 tools/hv/hv_kvp_daemon.c while (start[i] == ' ') start 1074 tools/hv/hv_kvp_daemon.c if ((x - start) <= out_len) { start 1075 tools/hv/hv_kvp_daemon.c strcpy(out_buf, (start + i)); start 1076 tools/hv/hv_kvp_daemon.c *offset += (x - start) + 1; start 18 tools/include/linux/bitmap.h void bitmap_clear(unsigned long *map, unsigned int start, int len); start 20 tools/include/linux/bitmap.h #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) start 429 tools/include/uapi/drm/i915_drm.h int start; /* agp offset */ start 657 tools/include/uapi/drm/i915_drm.h int start; start 62 tools/include/uapi/linux/fs.h __u64 start; start 184 tools/lib/bpf/bpf_prog_linfo.c __u32 jited_rec_size, rec_size, nr_linfo, start, i; start 195 tools/lib/bpf/bpf_prog_linfo.c start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip; start 198 tools/lib/bpf/bpf_prog_linfo.c (start * jited_rec_size); start 205 tools/lib/bpf/bpf_prog_linfo.c raw_linfo = prog_linfo->raw_linfo + (start * rec_size); start 131 tools/lib/bpf/btf.c const char *start = btf->nohdr_data + hdr->str_off; start 132 tools/lib/bpf/btf.c const char *end = start + btf->hdr->str_len; start 135 tools/lib/bpf/btf.c start[0] || end[-1]) { start 140 tools/lib/bpf/btf.c btf->strings = start; start 1612 tools/lib/bpf/btf.c char *start = (char *)d->btf->nohdr_data + hdr->str_off; start 1613 tools/lib/bpf/btf.c char *end = start + d->btf->hdr->str_len; start 1614 tools/lib/bpf/btf.c char *p = start, *tmp_strs = NULL; start 1619 tools/lib/bpf/btf.c .data = start, start 1705 tools/lib/bpf/btf.c memmove(start, tmp_strs, d->btf->hdr->str_len); start 1706 tools/lib/bpf/btf.c end = start + d->btf->hdr->str_len; start 1716 tools/lib/bpf/btf.c d->btf->hdr->str_len = end - start; start 5910 tools/lib/bpf/libbpf.c int err = 0, n, len, start, end = -1; start 5922 tools/lib/bpf/libbpf.c n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); start 5928 tools/lib/bpf/libbpf.c end = start; start 5930 tools/lib/bpf/libbpf.c if (start < 0 || start > end) { start 5932 tools/lib/bpf/libbpf.c start, end, s); start 5942 tools/lib/bpf/libbpf.c memset(tmp + *mask_sz, 0, start - *mask_sz); start 5943 tools/lib/bpf/libbpf.c memset(tmp + start, 1, end - start + 1); start 33 tools/lib/find_bit.c unsigned long start, unsigned long invert) start 37 tools/lib/find_bit.c if (unlikely(start >= nbits)) start 40 tools/lib/find_bit.c tmp = addr1[start / BITS_PER_LONG]; start 42 tools/lib/find_bit.c tmp &= addr2[start / BITS_PER_LONG]; start 46 tools/lib/find_bit.c tmp &= BITMAP_FIRST_WORD_MASK(start); start 47 tools/lib/find_bit.c start = round_down(start, BITS_PER_LONG); start 50 tools/lib/find_bit.c start += BITS_PER_LONG; start 51 tools/lib/find_bit.c if (start >= nbits) start 54 tools/lib/find_bit.c tmp = addr1[start / BITS_PER_LONG]; start 56 tools/lib/find_bit.c tmp &= addr2[start / BITS_PER_LONG]; start 60 tools/lib/find_bit.c return min(start + __ffs(tmp), nbits); start 33 tools/lib/symbol/kallsyms.c char type, u64 start)) start 46 tools/lib/symbol/kallsyms.c u64 start; start 57 tools/lib/symbol/kallsyms.c len = hex2u64(line, &start); start 77 tools/lib/symbol/kallsyms.c err = process_symbol(arg, symbol_name, symbol_type, start); start 29 tools/lib/symbol/kallsyms.h char type, u64 start)); start 53 tools/lib/traceevent/kbuffer-parse.c unsigned int start; start 523 tools/lib/traceevent/kbuffer-parse.c kbuf->start = 16; start 525 tools/lib/traceevent/kbuffer-parse.c kbuf->start = 12; start 527 tools/lib/traceevent/kbuffer-parse.c kbuf->data = subbuffer + kbuf->start; start 604 tools/lib/traceevent/kbuffer-parse.c if (offset < kbuf->start) start 607 tools/lib/traceevent/kbuffer-parse.c offset -= kbuf->start; start 659 tools/lib/traceevent/kbuffer-parse.c return kbuf->curr + kbuf->start; start 728 tools/lib/traceevent/kbuffer-parse.c return kbuf->start; start 751 tools/lib/traceevent/kbuffer-parse.c int start; start 759 tools/lib/traceevent/kbuffer-parse.c start = 16; start 761 tools/lib/traceevent/kbuffer-parse.c start = 12; start 766 tools/lib/traceevent/kbuffer-parse.c if (ptr < subbuf || ptr >= subbuf + start + size) start 15 tools/perf/arch/arm/tests/vectors-page.c void *start, *end; start 17 tools/perf/arch/arm/tests/vectors-page.c if (find_map(&start, &end, VECTORS__MAP_NAME)) { start 159 tools/perf/arch/powerpc/util/skip-callchain-idx.c Dwarf_Addr start = pc; start 206 tools/perf/arch/powerpc/util/skip-callchain-idx.c ra_regno = dwarf_frame_info(frame, &start, &end, &signalp); start 265 tools/perf/arch/powerpc/util/skip-callchain-idx.c rc = check_return_addr(dso, al.map->start, ip); start 144 tools/perf/arch/powerpc/util/sym-handling.c if (map->unmap_ip(map, sym->start) == tev->point.address) { start 11 tools/perf/arch/s390/util/machine.c int arch__fix_module_text_start(u64 *start, u64 *size, const char *name) start 13 tools/perf/arch/s390/util/machine.c u64 m_start = *start; start 18 tools/perf/arch/s390/util/machine.c if (sysfs__read_ull(path, (unsigned long long *)start) < 0) { start 20 tools/perf/arch/s390/util/machine.c *start = m_start; start 32 tools/perf/arch/s390/util/machine.c *size -= (*start - m_start); start 50 tools/perf/arch/s390/util/machine.c p->end = c->start; start 60 tools/perf/arch/x86/util/event.c event->mmap.start = pos->start; start 61 tools/perf/arch/x86/util/event.c event->mmap.len = pos->end - pos->start; start 25 tools/perf/arch/x86/util/machine.c static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start, start 40 tools/perf/arch/x86/util/machine.c mi->maps[mi->cnt].start = start; start 51 tools/perf/arch/x86/util/machine.c u64 start) start 57 tools/perf/arch/x86/util/machine.c mi->entry_trampoline = start; start 62 tools/perf/arch/x86/util/machine.c u64 end = start + page_size; start 64 tools/perf/arch/x86/util/machine.c return add_extra_kernel_map(mi, start, end, 0, name); start 38 tools/perf/bench/epoll-ctl.c struct timeval start, end, runtime; start 98 tools/perf/bench/epoll-ctl.c timersub(&end, &start, &runtime); start 364 tools/perf/bench/epoll-ctl.c gettimeofday(&start, NULL); start 93 tools/perf/bench/epoll-wait.c struct timeval start, end, runtime; start 280 tools/perf/bench/epoll-wait.c timersub(&end, &start, &runtime); start 482 tools/perf/bench/epoll-wait.c gettimeofday(&start, NULL); start 40 tools/perf/bench/futex-hash.c struct timeval start, end, runtime; start 107 tools/perf/bench/futex-hash.c timersub(&end, &start, &runtime); start 164 tools/perf/bench/futex-hash.c gettimeofday(&start, NULL); start 40 tools/perf/bench/futex-lock-pi.c struct timeval start, end, runtime; start 77 tools/perf/bench/futex-lock-pi.c timersub(&end, &start, &runtime); start 188 tools/perf/bench/futex-lock-pi.c gettimeofday(&start, NULL); start 161 tools/perf/bench/futex-requeue.c struct timeval start, end, runtime; start 176 tools/perf/bench/futex-requeue.c gettimeofday(&start, NULL); start 187 tools/perf/bench/futex-requeue.c timersub(&end, &start, &runtime); start 76 tools/perf/bench/futex-wake-parallel.c struct timeval start, end; start 80 tools/perf/bench/futex-wake-parallel.c gettimeofday(&start, NULL); start 88 tools/perf/bench/futex-wake-parallel.c timersub(&end, &start, &waker->runtime); start 166 tools/perf/bench/futex-wake.c struct timeval start, end, runtime; start 181 tools/perf/bench/futex-wake.c gettimeofday(&start, NULL); start 185 tools/perf/bench/futex-wake.c timersub(&end, &start, &runtime); start 801 tools/perf/bench/numa.c long start, end; start 805 tools/perf/bench/numa.c start = lfsr % words; start 806 tools/perf/bench/numa.c end = min(start + 1024, words-1); start 809 tools/perf/bench/numa.c bzero(data + start, (end-start) * sizeof(u64)); start 811 tools/perf/bench/numa.c for (j = start; j < end; j++) start 1093 tools/perf/bench/numa.c struct timeval start0, start, stop, diff; start 1153 tools/perf/bench/numa.c start = stop = start0; start 1154 tools/perf/bench/numa.c last_perturbance = start.tv_sec; start 1157 tools/perf/bench/numa.c start = stop; start 1203 tools/perf/bench/numa.c if (start.tv_sec == stop.tv_sec) start 1238 tools/perf/bench/numa.c timersub(&stop, &start, &diff); start 1487 tools/perf/bench/numa.c struct timeval start, stop, diff; start 1512 tools/perf/bench/numa.c gettimeofday(&start, NULL); start 1547 tools/perf/bench/numa.c timersub(&stop, &start, &diff); start 1556 tools/perf/bench/numa.c start = stop; start 1559 tools/perf/bench/numa.c gettimeofday(&start, NULL); start 1583 tools/perf/bench/numa.c timersub(&stop, &start, &diff); start 264 tools/perf/bench/sched-messaging.c struct timeval start, stop, diff; start 290 tools/perf/bench/sched-messaging.c gettimeofday(&start, NULL); start 302 tools/perf/bench/sched-messaging.c timersub(&stop, &start, &diff); start 81 tools/perf/bench/sched-pipe.c struct timeval start, stop, diff; start 99 tools/perf/bench/sched-pipe.c gettimeofday(&start, NULL); start 148 tools/perf/bench/sched-pipe.c timersub(&stop, &start, &diff); start 82 tools/perf/builtin-annotate.c static void process_basic_block(struct addr_map_symbol *start, start 86 tools/perf/builtin-annotate.c struct symbol *sym = start->sym; start 94 tools/perf/builtin-annotate.c if (!start->addr || start->addr > end->addr) start 97 tools/perf/builtin-annotate.c iter = block_range__create(start->addr, end->addr); start 550 tools/perf/builtin-diff.c if (bi_l->start == bi_r->start) { start 556 tools/perf/builtin-diff.c return (int64_t)(bi_r->start - bi_l->start); start 562 tools/perf/builtin-diff.c if (bi_l->sym->start != bi_r->sym->start) start 563 tools/perf/builtin-diff.c return (int64_t)(bi_r->sym->start - bi_l->sym->start); start 607 tools/perf/builtin-diff.c bi->start = ch->start; start 665 tools/perf/builtin-diff.c if ((!cmp) && (bi_a->start == bi_b->start) && (bi_a->end == bi_b->end)) start 1352 tools/perf/builtin-diff.c start_line = map__srcline(he->ms.map, bi->sym->start + bi->start, start 1355 tools/perf/builtin-diff.c end_line = map__srcline(he->ms.map, bi->sym->start + bi->end, start 1363 tools/perf/builtin-diff.c bi->start, bi->end, block_he->diff.cycles); start 40 tools/perf/builtin-kallsyms.c map->unmap_ip(map, symbol->start), map->unmap_ip(map, symbol->end), start 41 tools/perf/builtin-kallsyms.c symbol->start, symbol->end); start 300 tools/perf/builtin-kmem.c u64 start; start 313 tools/perf/builtin-kmem.c if (fa->start > fb->start) start 324 tools/perf/builtin-kmem.c if (fb->start <= fa->start && fa->end < fb->end) start 327 tools/perf/builtin-kmem.c if (fa->start > fb->start) start 369 tools/perf/builtin-kmem.c func[nr_alloc_funcs].start = sym->start; start 410 tools/perf/builtin-kmem.c key.start = key.end = node->ip; start 1020 tools/perf/builtin-kmem.c addr - map->unmap_ip(map, sym->start)); start 735 tools/perf/builtin-report.c indent, "", map->start, map->end, start 2553 tools/perf/builtin-sched.c if (ptime->start && ptime->start > t) start 2556 tools/perf/builtin-sched.c if (tprev && ptime->start > tprev) start 2557 tools/perf/builtin-sched.c tprev = ptime->start; start 2613 tools/perf/builtin-sched.c if (sched->hist_time.start == 0 && t >= ptime->start) start 2614 tools/perf/builtin-sched.c sched->hist_time.start = t; start 2808 tools/perf/builtin-sched.c u64 hist_time = sched->hist_time.end - sched->hist_time.start; start 875 tools/perf/builtin-script.c static int grab_bb(u8 *buffer, u64 start, u64 end, start 883 tools/perf/builtin-script.c if (!start || !end) start 886 tools/perf/builtin-script.c kernel = machine__kernel_ip(machine, start); start 899 tools/perf/builtin-script.c pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end); start 904 tools/perf/builtin-script.c if (end - start > MAXBB - MAXINSN) { start 906 tools/perf/builtin-script.c pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end); start 908 tools/perf/builtin-script.c pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start); start 912 tools/perf/builtin-script.c if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) { start 913 tools/perf/builtin-script.c pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); start 917 tools/perf/builtin-script.c pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); start 924 tools/perf/builtin-script.c offset = al.map->map_ip(al.map, start); start 926 tools/perf/builtin-script.c end - start + MAXINSN); start 931 tools/perf/builtin-script.c start, end); start 981 tools/perf/builtin-script.c if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end) start 993 tools/perf/builtin-script.c off = al.addr - al.sym->start; start 995 tools/perf/builtin-script.c off = al.addr - al.map->start - al.sym->start; start 1014 tools/perf/builtin-script.c u64 start, end; start 1052 tools/perf/builtin-script.c start = br->entries[i + 1].to; start 1055 tools/perf/builtin-script.c len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false); start 1059 tools/perf/builtin-script.c pr_debug("\tpatching up to %" PRIx64 "-%" PRIx64 "\n", start, end); start 1060 tools/perf/builtin-script.c len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false); start 1067 tools/perf/builtin-script.c uint64_t ip = start + off; start 1087 tools/perf/builtin-script.c if (off != end - start) start 1108 tools/perf/builtin-script.c start = br->entries[0].to; start 1110 tools/perf/builtin-script.c if (end < start) { start 1112 tools/perf/builtin-script.c end = start + 128; start 1114 tools/perf/builtin-script.c len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true); start 1115 tools/perf/builtin-script.c printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp); start 1128 tools/perf/builtin-script.c for (off = 0; off <= end - start; off += ilen) { start 1130 tools/perf/builtin-script.c printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off, start 1131 tools/perf/builtin-script.c dump_insn(&x, start + off, buffer + off, len - off, &ilen)); start 1134 tools/perf/builtin-script.c if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) { start 1142 tools/perf/builtin-script.c print_srccode(thread, x.cpumode, start + off); start 268 tools/perf/builtin-timechart.c unsigned int cpu, u64 start, u64 end, start 287 tools/perf/builtin-timechart.c sample->start_time = start; start 295 tools/perf/builtin-timechart.c if (sample->type == TYPE_RUNNING && end > start && start > 0) { start 296 tools/perf/builtin-timechart.c c->total_time += (end-start); start 297 tools/perf/builtin-timechart.c p->total_time += (end-start); start 300 tools/perf/builtin-timechart.c if (c->start_time == 0 || c->start_time > start) start 301 tools/perf/builtin-timechart.c c->start_time = start; start 302 tools/perf/builtin-timechart.c if (p->start_time == 0 || p->start_time > start) start 303 tools/perf/builtin-timechart.c p->start_time = start; start 719 tools/perf/builtin-timechart.c u64 start, int fd) start 751 tools/perf/builtin-timechart.c sample->start_time = start; start 757 tools/perf/builtin-timechart.c if (c->start_time == 0 || c->start_time > start) start 758 tools/perf/builtin-timechart.c c->start_time = start; start 180 tools/perf/builtin-top.c map->start, map->end, sym->start, sym->end, start 2443 tools/perf/builtin-trace.c al->addr - al->sym->start); start 25 tools/perf/lib/include/internal/mmap.h u64 start; start 14 tools/perf/lib/include/perf/event.h __u64 start; start 23 tools/perf/lib/include/perf/event.h __u64 start; start 10 tools/perf/lib/threadmap.c static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr) start 12 tools/perf/lib/threadmap.c size_t size = (nr - start) * sizeof(map->map[0]); start 14 tools/perf/lib/threadmap.c memset(&map->map[start], 0, size); start 21 tools/perf/lib/threadmap.c int start = map ? map->nr : 0; start 28 tools/perf/lib/threadmap.c perf_thread_map__reset(map, start, nr); start 15 tools/perf/perf-read-vdso.c void *start, *end; start 18 tools/perf/perf-read-vdso.c if (find_map(&start, &end, VDSO__MAP_NAME)) start 21 tools/perf/perf-read-vdso.c size = end - start; start 24 tools/perf/perf-read-vdso.c written = fwrite(start, 1, size, stdout); start 27 tools/perf/perf-read-vdso.c start += written; start 100 tools/perf/pmu-events/jevents.c strncat(*dst, map + bt->start, blen); start 189 tools/perf/pmu-events/jevents.c for (i = newval->start; i < newval->end; i++) { start 223 tools/perf/pmu-events/jevents.c json_len(val), map + val->start); start 259 tools/perf/pmu-events/jevents.c if (!(t)->start && (t) > tokens) \ start 39 tools/perf/pmu-events/jsmn.c tok->start = tok->end = -1; start 48 tools/perf/pmu-events/jsmn.c int start, int end) start 51 tools/perf/pmu-events/jsmn.c token->start = start; start 64 tools/perf/pmu-events/jsmn.c int start; start 66 tools/perf/pmu-events/jsmn.c start = parser->pos; start 89 tools/perf/pmu-events/jsmn.c parser->pos = start; start 98 tools/perf/pmu-events/jsmn.c parser->pos = start; start 105 tools/perf/pmu-events/jsmn.c parser->pos = start; start 108 tools/perf/pmu-events/jsmn.c jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos); start 121 tools/perf/pmu-events/jsmn.c int start = parser->pos; start 133 tools/perf/pmu-events/jsmn.c parser->pos = start; start 136 tools/perf/pmu-events/jsmn.c jsmn_fill_token(token, JSMN_STRING, start+1, start 161 tools/perf/pmu-events/jsmn.c parser->pos = start; start 166 tools/perf/pmu-events/jsmn.c parser->pos = start; start 194 tools/perf/pmu-events/jsmn.c token->start = parser->pos; start 202 tools/perf/pmu-events/jsmn.c if (token->start != -1 && token->end == -1) { start 215 tools/perf/pmu-events/jsmn.c if (token->start != -1 && token->end == -1) { start 281 tools/perf/pmu-events/jsmn.c if (tokens[i].start != -1 && tokens[i].end == -1) start 38 tools/perf/pmu-events/jsmn.h int start; start 134 tools/perf/pmu-events/json.c return countchar(map, '\n', t->start) + 1; start 154 tools/perf/pmu-events/json.c return t->end - t->start; start 161 tools/perf/pmu-events/json.c return len == strlen(s) && !strncasecmp(map + t->start, s, len); start 297 tools/perf/tests/code-reading.c if (state->done[d] == al.map->start) { start 307 tools/perf/tests/code-reading.c state->done[state->done_cnt++] = al.map->start; start 27 tools/perf/tests/hists_common.c u64 start; start 42 tools/perf/tests/hists_common.c u64 start; start 115 tools/perf/tests/hists_common.c .start = fake_mmap_info[i].start, start 142 tools/perf/tests/hists_common.c sym = symbol__new(fsym->start, fsym->length, start 12 tools/perf/tests/map_groups.c u64 start; start 23 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start); start 76 tools/perf/tests/map_groups.c map->start = bpf_progs[i].start; start 92 tools/perf/tests/map_groups.c map_kcore1->start = 100; start 96 tools/perf/tests/map_groups.c map_kcore2->start = 550; start 100 tools/perf/tests/map_groups.c map_kcore3->start = 880; start 205 tools/perf/tests/mmap-thread-lookup.c pr_debug("map %p, addr %" PRIx64 "\n", al.map, al.map->start); start 23 tools/perf/tests/switch-tracking.c struct timeval start, now, diff, maxtime; start 30 tools/perf/tests/switch-tracking.c err = gettimeofday(&start, NULL); start 43 tools/perf/tests/switch-tracking.c timersub(&now, &start, &diff); start 36 tools/perf/tests/time-utils-test.c static bool test__perf_time__parse_str(const char *ostr, u64 start, u64 end) start 49 tools/perf/tests/time-utils-test.c if (ptime.start != start || ptime.end != end) { start 51 tools/perf/tests/time-utils-test.c start, end); start 102 tools/perf/tests/time-utils-test.c if (ptime[i].start != d->ptime[i].start || start 105 tools/perf/tests/time-utils-test.c i, d->ptime[i].start, d->ptime[i].end); start 117 tools/perf/tests/vmlinux-kallsyms.c if (sym->start == sym->end) start 120 tools/perf/tests/vmlinux-kallsyms.c mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start); start 126 tools/perf/tests/vmlinux-kallsyms.c if (pair && UM(pair->start) == mem_start) { start 154 tools/perf/tests/vmlinux-kallsyms.c if (UM(pair->start) == mem_start) start 213 tools/perf/tests/vmlinux-kallsyms.c mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); start 220 tools/perf/tests/vmlinux-kallsyms.c if (pair->start == mem_start) { start 227 tools/perf/tests/vmlinux-kallsyms.c map->start, map->end, map->pgoff, map->dso->name); start 230 tools/perf/tests/vmlinux-kallsyms.c pair->start, pair->end, pair->pgoff); start 654 tools/perf/ui/browser.c u16 start, u16 end) start 657 tools/perf/ui/browser.c ui_browser__gotorc(browser, start, column); start 658 tools/perf/ui/browser.c SLsmg_draw_vline(end - start + 1); start 672 tools/perf/ui/browser.c u64 start, u64 end) start 678 tools/perf/ui/browser.c if (start < browser->top_idx + browser->rows) { start 679 tools/perf/ui/browser.c row = start - browser->top_idx; start 712 tools/perf/ui/browser.c u64 start, u64 end) start 718 tools/perf/ui/browser.c if (start >= browser->top_idx) { start 719 tools/perf/ui/browser.c row = start - browser->top_idx; start 751 tools/perf/ui/browser.c u64 start, u64 end) start 753 tools/perf/ui/browser.c if (start > end) start 754 tools/perf/ui/browser.c __ui_browser__line_arrow_up(browser, column, start, end); start 756 tools/perf/ui/browser.c __ui_browser__line_arrow_down(browser, column, start, end); start 52 tools/perf/ui/browser.h u64 start, u64 end); start 65 tools/perf/ui/browser.h u16 start, u16 end); start 1685 tools/perf/ui/browsers/hists.c char *start; start 1700 tools/perf/ui/browsers/hists.c start = strim(dummy_hpp.buf); start 1701 tools/perf/ui/browsers/hists.c ret = strlen(start); start 1703 tools/perf/ui/browsers/hists.c if (start != dummy_hpp.buf) start 1704 tools/perf/ui/browsers/hists.c memmove(dummy_hpp.buf, start, ret + 1); start 2572 tools/perf/ui/browsers/hists.c char start[32], end[32]; start 2580 tools/perf/ui/browsers/hists.c timestamp__scnprintf_usec(starttime, start, sizeof start); start 2582 tools/perf/ui/browsers/hists.c n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end); start 34 tools/perf/ui/browsers/map.c mb->addrlen, sym->start, mb->addrlen, sym->end, start 60 tools/perf/ui/gtk/annotate.c u64 start = map__rip_2objdump(map, sym->start); start 67 tools/perf/ui/gtk/annotate.c return scnprintf(buf, size, "%"PRIx64, start + dl->al.offset); start 418 tools/perf/ui/stdio/hist.c char *start = hpp->buf; start 448 tools/perf/ui/stdio/hist.c return hpp->buf - start; start 337 tools/perf/util/annotate.c u64 start, end; start 370 tools/perf/util/annotate.c start = map->unmap_ip(map, sym->start), start 373 tools/perf/util/annotate.c ops->target.outside = target.addr < start || target.addr > end; start 398 tools/perf/util/annotate.c ops->target.offset = target.addr - start; start 818 tools/perf/util/annotate.c u64 start, start 846 tools/perf/util/annotate.c ch[offset].start > start)) { start 853 tools/perf/util/annotate.c ch[offset].start < start) start 857 tools/perf/util/annotate.c ch[offset].start = start; start 872 tools/perf/util/annotate.c if ((addr < sym->start || addr >= sym->end) && start 873 tools/perf/util/annotate.c (addr != sym->end || sym->start != sym->end)) { start 875 tools/perf/util/annotate.c __func__, __LINE__, sym->name, sym->start, addr, sym->end); start 879 tools/perf/util/annotate.c offset = addr - sym->start; start 883 tools/perf/util/annotate.c __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); start 893 tools/perf/util/annotate.c sym->start, sym->name, addr, addr - sym->start, evidx, start 950 tools/perf/util/annotate.c static int symbol__account_cycles(u64 addr, u64 start, start 961 tools/perf/util/annotate.c if (addr < sym->start || addr >= sym->end) start 964 tools/perf/util/annotate.c if (start) { start 965 tools/perf/util/annotate.c if (start < sym->start || start >= sym->end) start 967 tools/perf/util/annotate.c if (start >= addr) start 968 tools/perf/util/annotate.c start = 0; start 970 tools/perf/util/annotate.c offset = addr - sym->start; start 972 tools/perf/util/annotate.c start ? start - sym->start : 0, start 974 tools/perf/util/annotate.c !!start); start 978 tools/perf/util/annotate.c struct addr_map_symbol *start, start 994 tools/perf/util/annotate.c if (start && start 995 tools/perf/util/annotate.c (start->sym == ams->sym || start 997 tools/perf/util/annotate.c start->addr == ams->sym->start + ams->map->start))) start 998 tools/perf/util/annotate.c saddr = start->al_addr; start 1002 tools/perf/util/annotate.c start ? start->addr : 0, start 1003 tools/perf/util/annotate.c ams->sym ? ams->sym->start + ams->map->start : 0, start 1011 tools/perf/util/annotate.c static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end) start 1016 tools/perf/util/annotate.c for (offset = start; offset <= end; offset++) { start 1023 tools/perf/util/annotate.c static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) start 1029 tools/perf/util/annotate.c n_insn = annotation__count_insn(notes, start, end); start 1037 tools/perf/util/annotate.c for (offset = start; offset <= end; offset++) { start 1075 tools/perf/util/annotate.c annotation__count_and_fill(notes, ch->start, offset, ch); start 1309 tools/perf/util/annotate.c if (br->is_target && br->start == addr) { start 1353 tools/perf/util/annotate.c static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width) start 1356 tools/perf/util/annotate.c const u64 addr = start + offset; start 1367 tools/perf/util/annotate.c annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start, start 1405 tools/perf/util/annotate.c annotation_line__print(queue, sym, start, evsel, len, start 1446 tools/perf/util/annotate.c disasm_line__print(dl, start, addr_fmt_width); start 1526 tools/perf/util/annotate.c u64 start = map__rip_2objdump(map, sym->start), start 1529 tools/perf/util/annotate.c offset = line_ip - start; start 1530 tools/perf/util/annotate.c if ((u64)line_ip < start || (u64)line_ip >= end) start 1550 tools/perf/util/annotate.c map__rip_2objdump(map, sym->start); start 1562 tools/perf/util/annotate.c target.sym->start == target.al_addr) start 1719 tools/perf/util/annotate.c int len = sym->end - sym->start; start 1738 tools/perf/util/annotate.c sym->name, sym->start, sym->end - sym->start); start 1881 tools/perf/util/annotate.c symfs_filename, sym->name, map->unmap_ip(map, sym->start), start 1891 tools/perf/util/annotate.c kce.addr = map__rip_2objdump(map, sym->start); start 1892 tools/perf/util/annotate.c kce.offs = sym->start; start 1893 tools/perf/util/annotate.c kce.len = sym->end - sym->start; start 1917 tools/perf/util/annotate.c map__rip_2objdump(map, sym->start), start 2110 tools/perf/util/annotate.c notes->start = map__rip_2objdump(map, sym->start); start 2249 tools/perf/util/annotate.c sym->start + offset, h->addr[offset].nr_samples); start 2253 tools/perf/util/annotate.c static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start) start 2260 tools/perf/util/annotate.c return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset); start 2277 tools/perf/util/annotate.c u64 start = map__rip_2objdump(map, sym->start); start 2316 tools/perf/util/annotate.c addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start); start 2326 tools/perf/util/annotate.c err = annotation_line__print(pos, sym, start, evsel, len, start 2665 tools/perf/util/annotate.c al->path = get_srcline(map->dso, notes->start + al->offset, NULL, start 2666 tools/perf/util/annotate.c false, true, notes->start + al->offset); start 2926 tools/perf/util/annotate.c addr += notes->start; start 233 tools/perf/util/annotate.h u64 start; start 273 tools/perf/util/annotate.h u64 start; start 341 tools/perf/util/annotate.h struct addr_map_symbol *start, start 1572 tools/perf/util/auxtrace.c filt->start = true; start 1575 tools/perf/util/auxtrace.c filt->start = true; start 1577 tools/perf/util/auxtrace.c filt->start = false; start 1579 tools/perf/util/auxtrace.c filt->start = false; start 1706 tools/perf/util/auxtrace.c u64 start; start 1727 tools/perf/util/auxtrace.c static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start) start 1733 tools/perf/util/auxtrace.c args->size = start - args->start; start 1743 tools/perf/util/auxtrace.c args->start = start; start 1749 tools/perf/util/auxtrace.c static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start) start 1755 tools/perf/util/auxtrace.c ++args->cnt, start, type, name); start 1780 tools/perf/util/auxtrace.c static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx) start 1790 tools/perf/util/auxtrace.c *start = 0; start 1814 tools/perf/util/auxtrace.c *start = args.start; start 1821 tools/perf/util/auxtrace.c char type, u64 start) start 1830 tools/perf/util/auxtrace.c args->start = start; start 1833 tools/perf/util/auxtrace.c args->size = round_up(start, page_size) + page_size - args->start; start 1849 tools/perf/util/auxtrace.c filt->addr = args.start; start 1855 tools/perf/util/auxtrace.c static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size) start 1857 tools/perf/util/auxtrace.c if (start + size >= filt->addr) start 1862 tools/perf/util/auxtrace.c filt->sym_to, start, filt->sym_from, filt->addr); start 1865 tools/perf/util/auxtrace.c filt->sym_to, start, filt->addr); start 1874 tools/perf/util/auxtrace.c u64 start, size; start 1886 tools/perf/util/auxtrace.c err = find_kern_sym(filt->sym_from, &start, &size, start 1890 tools/perf/util/auxtrace.c filt->addr = start; start 1898 tools/perf/util/auxtrace.c err = find_kern_sym(filt->sym_to, &start, &size, start 1903 tools/perf/util/auxtrace.c err = check_end_after_start(filt, start, size); start 1906 tools/perf/util/auxtrace.c filt->size = start + size - filt->addr; start 1961 tools/perf/util/auxtrace.c ++cnt, sym->start, start 1978 tools/perf/util/auxtrace.c static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, start 1984 tools/perf/util/auxtrace.c *start = 0; start 1989 tools/perf/util/auxtrace.c if (*start) { start 1991 tools/perf/util/auxtrace.c *size = sym->start - *start; start 2000 tools/perf/util/auxtrace.c *start = sym->start; start 2001 tools/perf/util/auxtrace.c *size = sym->end - sym->start; start 2006 tools/perf/util/auxtrace.c if (!*start) start 2028 tools/perf/util/auxtrace.c u64 start, size; start 2050 tools/perf/util/auxtrace.c err = find_dso_sym(dso, filt->sym_from, &start, &size, start 2054 tools/perf/util/auxtrace.c filt->addr = start; start 2060 tools/perf/util/auxtrace.c err = find_dso_sym(dso, filt->sym_to, &start, &size, start 2065 tools/perf/util/auxtrace.c err = check_end_after_start(filt, start, size); start 2069 tools/perf/util/auxtrace.c filt->size = start + size - filt->addr; start 361 tools/perf/util/auxtrace.h bool start; start 25 tools/perf/util/block-range.c assert(old < entry->start); start 26 tools/perf/util/block-range.c assert(entry->start <= entry->end); /* single instruction block; jump to a jump */ start 43 tools/perf/util/block-range.c if (addr < entry->start) start 81 tools/perf/util/block-range.c struct block_range_iter block_range__create(u64 start, u64 end) start 92 tools/perf/util/block-range.c if (start < entry->start) start 94 tools/perf/util/block-range.c else if (start > entry->end) start 112 tools/perf/util/block-range.c if (entry->end < start) { start 119 tools/perf/util/block-range.c if (next->start <= end) { /* add head: [start...][n->start...] */ start 125 tools/perf/util/block-range.c .start = start, start 126 tools/perf/util/block-range.c .end = next->start - 1, start 135 tools/perf/util/block-range.c iter.start = head; start 148 tools/perf/util/block-range.c .start = start, start 158 tools/perf/util/block-range.c iter.start = entry; start 166 tools/perf/util/block-range.c if (entry->start < start) { /* split: [e->start...][start...] */ start 172 tools/perf/util/block-range.c .start = entry->start, start 173 tools/perf/util/block-range.c .end = start - 1, start 181 tools/perf/util/block-range.c entry->start = start; start 189 tools/perf/util/block-range.c } else if (entry->start == start) start 192 tools/perf/util/block-range.c iter.start = entry; start 199 tools/perf/util/block-range.c entry = iter.start; start 210 tools/perf/util/block-range.c .start = end + 1, start 249 tools/perf/util/block-range.c if (end < next->start) { /* add tail: [...e->end][...end] */ start 257 tools/perf/util/block-range.c .start = entry->end + 1, start 274 tools/perf/util/block-range.c if (entry->end + 1 != next->start) { start 280 tools/perf/util/block-range.c .start = entry->end + 1, start 281 tools/perf/util/block-range.c .end = next->start - 1, start 295 tools/perf/util/block-range.c assert(iter.start->start == start && iter.start->is_target); start 27 tools/perf/util/block-range.h u64 start; start 47 tools/perf/util/block-range.h struct block_range *start; start 53 tools/perf/util/block-range.h return iter->start; start 58 tools/perf/util/block-range.h if (iter->start == iter->end) start 61 tools/perf/util/block-range.h iter->start = block_range__next(iter->start); start 67 tools/perf/util/block-range.h if (!iter->start || !iter->end) start 73 tools/perf/util/block-range.h extern struct block_range_iter block_range__create(u64 start, u64 end); start 1160 tools/perf/util/bpf-loader.c unsigned int start = array->ranges[i].start; start 1162 tools/perf/util/bpf-loader.c unsigned int idx = start + length - 1; start 1291 tools/perf/util/bpf-loader.c unsigned int start = op->k.array.ranges[i].start; start 1295 tools/perf/util/bpf-loader.c unsigned int idx = start + j; start 736 tools/perf/util/callchain.c match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start, start 737 tools/perf/util/callchain.c node->map, node->sym->start); start 919 tools/perf/util/callchain.c u64 start = cursor->pos; start 952 tools/perf/util/callchain.c matches = cursor->pos - start; start 956 tools/perf/util/callchain.c if (split_add_child(root, cursor, cnode, start, matches, start 115 tools/perf/util/comm.c comm->start = timestamp; start 137 tools/perf/util/comm.c comm->start = timestamp; start 13 tools/perf/util/comm.h u64 start; start 472 tools/perf/util/cpumap.c int i, cpu, start = -1; start 483 tools/perf/util/cpumap.c if (start == -1) { start 484 tools/perf/util/cpumap.c start = i; start 490 tools/perf/util/cpumap.c } else if (((i - start) != (cpu - map->map[start])) || last) { start 493 tools/perf/util/cpumap.c if (start == end) { start 496 tools/perf/util/cpumap.c map->map[start]); start 500 tools/perf/util/cpumap.c map->map[start], map->map[end]); start 503 tools/perf/util/cpumap.c start = i; start 55 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c u64 start, u64 end, start 60 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end, start 88 tools/perf/util/cs-etm-decoder/cs-etm-decoder.h u64 start, u64 end, start 923 tools/perf/util/data-convert-bt.c __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start); start 929 tools/perf/util/data-convert-bt.c __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start); start 1274 tools/perf/util/data-convert-bt.c __NON_SAMPLE_ADD_FIELD(u64_hex, start); start 1281 tools/perf/util/data-convert-bt.c __NON_SAMPLE_ADD_FIELD(u64_hex, start); start 202 tools/perf/util/db-export.c *offset = al->addr - al->sym->start; start 49 tools/perf/util/demangle-rust.c static bool is_prefixed_hash(const char *start); start 1038 tools/perf/util/dwarf-aux.c Dwarf_Addr start, end; start 1061 tools/perf/util/dwarf-aux.c &start, &end)) > 0) { start 1062 tools/perf/util/dwarf-aux.c start -= entry; start 1067 tools/perf/util/dwarf-aux.c name, start, end); start 1071 tools/perf/util/dwarf-aux.c start, end); start 1098 tools/perf/util/dwarf-aux.c Dwarf_Addr start, end; start 1119 tools/perf/util/dwarf-aux.c &start, &end, &op, &nops)) > 0) { start 1120 tools/perf/util/dwarf-aux.c if (start == 0) { start 1127 tools/perf/util/dwarf-aux.c start -= entry; start 1131 tools/perf/util/dwarf-aux.c name, start, end); start 1135 tools/perf/util/dwarf-aux.c start, end); start 88 tools/perf/util/event.c u64 start; start 92 tools/perf/util/event.c u64 start) start 104 tools/perf/util/event.c args->start = start; start 116 tools/perf/util/event.c *addr = args.start; start 258 tools/perf/util/event.c event->mmap.pid, event->mmap.tid, event->mmap.start, start 268 tools/perf/util/event.c event->mmap2.pid, event->mmap2.tid, event->mmap2.start, start 2 tools/perf/util/find-map.c static int find_map(void **start, void **end, const char *name) start 19 tools/perf/util/find-map.c start, end, &m)) start 252 tools/perf/util/intel-bts.c void *start; start 257 tools/perf/util/intel-bts.c start = intel_bts_find_overlap(a->data, a->size, b->data, b->size); start 258 tools/perf/util/intel-bts.c if (!start) start 260 tools/perf/util/intel-bts.c b->use_size = b->data + b->size - start; start 261 tools/perf/util/intel-bts.c b->use_data = start; start 837 tools/perf/util/intel-bts.c static void intel_bts_print_info(__u64 *arr, int start, int finish) start 844 tools/perf/util/intel-bts.c for (i = start; i <= finish; i++) start 2885 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c unsigned char *start; start 2888 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c start = buf_b + len_b - (rem_b - rem_a); start 2889 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c return adj_for_padding(start, buf_a, len_a); start 49 tools/perf/util/intel-pt.c u64 start; start 240 tools/perf/util/intel-pt.c void *start; start 242 tools/perf/util/intel-pt.c start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, start 244 tools/perf/util/intel-pt.c if (!start) start 246 tools/perf/util/intel-pt.c b->use_size = b->data + b->size - start; start 247 tools/perf/util/intel-pt.c b->use_data = start; start 653 tools/perf/util/intel-pt.c if (filt->start) start 666 tools/perf/util/intel-pt.c filt->start ? "filter" : "stop", start 669 tools/perf/util/intel-pt.c if (filt->start) start 1027 tools/perf/util/intel-pt.c ptq->sel_timestamp = pt->time_ranges[0].start; start 2021 tools/perf/util/intel-pt.c struct symbol *sym, *start; start 2035 tools/perf/util/intel-pt.c start = dso__first_symbol(map->dso); start 2037 tools/perf/util/intel-pt.c for (sym = start; sym; sym = dso__next_symbol(sym)) { start 2040 tools/perf/util/intel-pt.c ip = map->unmap_ip(map, sym->start); start 2041 tools/perf/util/intel-pt.c if (ip >= map->start && ip < map->end) { start 2056 tools/perf/util/intel-pt.c for (sym = start; sym; sym = dso__next_symbol(sym)) { start 2058 tools/perf/util/intel-pt.c ip = map->unmap_ip(map, sym->start); start 2059 tools/perf/util/intel-pt.c if (ip >= map->start && ip < map->end) { start 2101 tools/perf/util/intel-pt.c ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start; start 3010 tools/perf/util/intel-pt.c u64 ts = p[i].start; start 3017 tools/perf/util/intel-pt.c r->start = ts ? intel_pt_tsc_start(ts, pt) : 0; start 3023 tools/perf/util/intel-pt.c i, r->start, r->end); start 3048 tools/perf/util/intel-pt.c static void intel_pt_print_info(__u64 *arr, int start, int finish) start 3055 tools/perf/util/intel-pt.c for (i = start; i <= finish; i++) start 458 tools/perf/util/jitdump.c event->mmap2.start = addr; start 549 tools/perf/util/jitdump.c event->mmap2.start = jr->move.new_code_addr; start 730 tools/perf/util/machine.c map->start = event->ksymbol.addr; start 731 tools/perf/util/machine.c map->end = map->start + event->ksymbol.len; start 735 tools/perf/util/machine.c sym = symbol__new(map->map_ip(map, map->start), start 770 tools/perf/util/machine.c struct map *machine__findnew_module_map(struct machine *machine, u64 start, start 788 tools/perf/util/machine.c map = map__new2(start, dso); start 907 tools/perf/util/machine.c u64 start; start 927 tools/perf/util/machine.c u64 *start, u64 *end) start 951 tools/perf/util/machine.c *start = addr; start 967 tools/perf/util/machine.c map = map__new2(xm->start, kernel); start 982 tools/perf/util/machine.c kmap->name, map->start, map->end); start 1005 tools/perf/util/machine.c return sym->start; start 1062 tools/perf/util/machine.c .start = va, start 1366 tools/perf/util/machine.c int __weak arch__fix_module_text_start(u64 *start __maybe_unused, start 1373 tools/perf/util/machine.c static int machine__create_module(void *arg, const char *name, u64 start, start 1379 tools/perf/util/machine.c if (arch__fix_module_text_start(&start, &size, name) < 0) start 1382 tools/perf/util/machine.c map = machine__findnew_module_map(machine, start, name); start 1385 tools/perf/util/machine.c map->end = start + size; start 1419 tools/perf/util/machine.c u64 start, u64 end) start 1421 tools/perf/util/machine.c machine->vmlinux_map->start = start; start 1427 tools/perf/util/machine.c if (start == 0 && end == 0) start 1432 tools/perf/util/machine.c u64 start, u64 end) start 1439 tools/perf/util/machine.c machine__set_kernel_mmap(machine, start, end); start 1450 tools/perf/util/machine.c u64 start = 0, end = ~0ULL; start 1469 tools/perf/util/machine.c if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { start 1471 tools/perf/util/machine.c map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) { start 1481 tools/perf/util/machine.c machine__update_kernel_mmap(machine, start, end); start 1491 tools/perf/util/machine.c machine__set_kernel_mmap(machine, start, map->start); start 1524 tools/perf/util/machine.c .start = event->mmap.start, start 1525 tools/perf/util/machine.c .end = event->mmap.start + event->mmap.len, start 1558 tools/perf/util/machine.c map = machine__findnew_module_map(machine, event->mmap.start, start 1563 tools/perf/util/machine.c map->end = map->start + event->mmap.len; start 1620 tools/perf/util/machine.c machine__update_kernel_mmap(machine, event->mmap.start, start 1621 tools/perf/util/machine.c event->mmap.start + event->mmap.len); start 1672 tools/perf/util/machine.c map = map__new(machine, event->mmap2.start, start 1728 tools/perf/util/machine.c map = map__new(machine, event->mmap.start, start 2671 tools/perf/util/machine.c machine->kernel_start = map->start; start 2718 tools/perf/util/machine.c *addrp = map->unmap_ip(map, sym->start); start 224 tools/perf/util/machine.h struct map *machine__findnew_module_map(struct machine *machine, u64 start, start 226 tools/perf/util/machine.h int arch__fix_module_text_start(u64 *start, u64 *size, const char *name); start 270 tools/perf/util/machine.h u64 start; start 133 tools/perf/util/map.c void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) start 135 tools/perf/util/map.c map->start = start; start 148 tools/perf/util/map.c struct map *map__new(struct machine *machine, u64 start, u64 len, start 205 tools/perf/util/map.c map__init(map, start, start + len, pgoff, dso); start 233 tools/perf/util/map.c struct map *map__new2(u64 start, struct dso *dso) start 241 tools/perf/util/map.c map__init(map, start, 0, 0, dso); start 313 tools/perf/util/map.c map->start = sym->start; start 407 tools/perf/util/map.c map->start, map->end, map->pgoff, map->dso->name); start 681 tools/perf/util/map.c u64 ip = map->unmap_ip(map, sym->start); start 683 tools/perf/util/map.c return ip >= map->start && ip < map->end; start 725 tools/perf/util/map.c if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { start 792 tools/perf/util/map.c if (pos->end > map->start) { start 794 tools/perf/util/map.c if (pos->start <= map->start) start 810 tools/perf/util/map.c if (pos->start >= map->end) start 830 tools/perf/util/map.c if (map->start > pos->start) { start 838 tools/perf/util/map.c before->end = map->start; start 853 tools/perf/util/map.c after->start = map->end; start 854 tools/perf/util/map.c after->pgoff += map->end - pos->start; start 915 tools/perf/util/map.c const u64 ip = map->start; start 921 tools/perf/util/map.c if (ip < m->start) start 987 tools/perf/util/map.c if (ip < m->start) start 27 tools/perf/util/map.h u64 start; start 57 tools/perf/util/map.h return ip - map->start + map->pgoff; start 62 tools/perf/util/map.h return ip + map->start - map->pgoff; start 72 tools/perf/util/map.h return map->end - map->start; start 112 tools/perf/util/map.h u64 start, u64 end, u64 pgoff, struct dso *dso); start 113 tools/perf/util/map.h struct map *map__new(struct machine *machine, u64 start, u64 len, start 117 tools/perf/util/map.h struct map *map__new2(u64 start, struct dso *dso); start 12 tools/perf/util/mem2node.c u64 start; start 27 tools/perf/util/mem2node.c if (entry->start < e->start) start 38 tools/perf/util/mem2node.c phys_entry__init(struct phys_entry *entry, u64 start, u64 bsize, u64 node) start 40 tools/perf/util/mem2node.c entry->start = start; start 41 tools/perf/util/mem2node.c entry->end = start + bsize; start 71 tools/perf/util/mem2node.c u64 start; start 76 tools/perf/util/mem2node.c start = bit * bsize; start 85 tools/perf/util/mem2node.c if ((prev->end == start) && start 92 tools/perf/util/mem2node.c phys_entry__init(&entries[j++], start, bsize, n->node); start 103 tools/perf/util/mem2node.c entries[i].node, entries[i].start, entries[i].end); start 126 tools/perf/util/mem2node.c if (addr < entry->start) start 100 tools/perf/util/mmap.c event = perf_mmap__read(map, &map->core.start, map->core.end); start 103 tools/perf/util/mmap.c map->core.prev = map->core.start; start 409 tools/perf/util/mmap.c static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) start 412 tools/perf/util/mmap.c u64 evt_head = *start; start 415 tools/perf/util/mmap.c pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start); start 416 tools/perf/util/mmap.c pheader = (struct perf_event_header *)(buf + (*start & mask)); start 418 tools/perf/util/mmap.c if (evt_head - *start >= (unsigned int)size) { start 420 tools/perf/util/mmap.c if (evt_head - *start > (unsigned int)size) start 451 tools/perf/util/mmap.c md->core.start = md->core.overwrite ? head : old; start 454 tools/perf/util/mmap.c if ((md->core.end - md->core.start) < md->core.flush) start 457 tools/perf/util/mmap.c size = md->core.end - md->core.start; start 471 tools/perf/util/mmap.c if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end)) start 502 tools/perf/util/mmap.c size = md->core.end - md->core.start; start 504 tools/perf/util/mmap.c if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { start 505 tools/perf/util/mmap.c buf = &data[md->core.start & md->core.mask]; start 506 tools/perf/util/mmap.c size = md->core.mask + 1 - (md->core.start & md->core.mask); start 507 tools/perf/util/mmap.c md->core.start += size; start 515 tools/perf/util/mmap.c buf = &data[md->core.start & md->core.mask]; start 516 tools/perf/util/mmap.c size = md->core.end - md->core.start; start 517 tools/perf/util/mmap.c md->core.start += size; start 86 tools/perf/util/parse-events.h unsigned int start; start 114 tools/perf/util/parse-events.y start: start 693 tools/perf/util/parse-events.y array.ranges[0].start = $1; start 706 tools/perf/util/parse-events.y array.ranges[0].start = $1; start 1364 tools/perf/util/pmu.c static void wordwrap(char *s, int start, int max, int corr) start 1366 tools/perf/util/pmu.c int column = start; start 1372 tools/perf/util/pmu.c if (column + wlen >= max && column > start) { start 1373 tools/perf/util/pmu.c printf("\n%*s", start, ""); start 1374 tools/perf/util/pmu.c column = start + corr; start 1376 tools/perf/util/pmu.c n = printf("%s%.*s", column > start ? " " : "", wlen, s); start 135 tools/perf/util/probe-event.c *addr = map->unmap_ip(map, sym->start) - start 137 tools/perf/util/probe-event.c ((reladdr) ? map->start : 0); start 374 tools/perf/util/probe-event.c address = sym->start; start 376 tools/perf/util/probe-event.c address = map->unmap_ip(map, sym->start) - map->reloc; start 425 tools/perf/util/probe-event.c .line = lr->start }; start 432 tools/perf/util/probe-event.c len = lr->end - lr->start; start 438 tools/perf/util/probe-event.c lr->start = result.line; start 440 tools/perf/util/probe-event.c lr->end = lr->start + len; start 636 tools/perf/util/probe-event.c tp->offset = addr - sym->start; start 984 tools/perf/util/probe-event.c lr->start - lr->offset); start 986 tools/perf/util/probe-event.c fprintf(stdout, "<%s:%d>\n", lr->path, lr->start); start 995 tools/perf/util/probe-event.c while (l < lr->start) { start 1199 tools/perf/util/probe-event.c const char *start = *ptr; start 1203 tools/perf/util/probe-event.c if (errno || *ptr == start) { start 1237 tools/perf/util/probe-event.c lr->start = 0; start 1244 tools/perf/util/probe-event.c err = parse_line_num(&range, &lr->start, "start line"); start 1256 tools/perf/util/probe-event.c lr->end += lr->start; start 1267 tools/perf/util/probe-event.c pr_debug("Line range is %d to %d\n", lr->start, lr->end); start 1270 tools/perf/util/probe-event.c if (lr->start > lr->end) { start 2125 tools/perf/util/probe-event.c pp->offset = addr - map->unmap_ip(map, sym->start); start 2338 tools/perf/util/probe-event.c unsigned long start; start 2384 tools/perf/util/probe-event.c if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) { start 2401 tools/perf/util/probe-event.c node->start, node->end, node->symbol); start 2418 tools/perf/util/probe-event.c if (node->start <= address && address < node->end) start 2953 tools/perf/util/probe-event.c if (pp->offset > sym->end - sym->start) { start 2960 tools/perf/util/probe-event.c tp->address = map->unmap_ip(map, sym->start) + pp->offset; start 108 tools/perf/util/probe-event.h int start; /* Start line number */ start 1767 tools/perf/util/probe-finder.c lf->lno_s = lr->offset + lr->start; start 1774 tools/perf/util/probe-finder.c lr->start = lf->lno_s; start 1840 tools/perf/util/probe-finder.c lf.lno_s = lr->start; start 131 tools/perf/util/python.c member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), start 147 tools/perf/util/python.c pevent->event.mmap.start, pevent->event.mmap.len, start 303 tools/perf/util/scripting-engines/trace-event-perl.c if (!hv_stores(sym, "start", newSVuv(node->sym->start)) || start 436 tools/perf/util/scripting-engines/trace-event-python.c PyLong_FromUnsignedLongLong(node->sym->start)); start 526 tools/perf/util/scripting-engines/trace-event-python.c offset = al->addr - sym->start; start 528 tools/perf/util/scripting-engines/trace-event-python.c offset = al->addr - al->map->start - sym->start; start 1026 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, 3, comm->start); start 1090 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, 2, sym->start); start 568 tools/perf/util/session.c event->mmap.start = bswap_64(event->mmap.start); start 585 tools/perf/util/session.c event->mmap2.start = bswap_64(event->mmap2.start); start 250 tools/perf/util/sort.c if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) start 254 tools/perf/util/sort.c if (sym_l->start != sym_r->start) start 255 tools/perf/util/sort.c return (int64_t)(sym_r->start - sym_l->start); start 307 tools/perf/util/sort.c ip - map->unmap_ip(map, sym->start)); start 108 tools/perf/util/srcline.c inline_sym = symbol__new(base_sym ? base_sym->start : 0, start 109 tools/perf/util/srcline.c base_sym ? (base_sym->end - base_sym->start) : 0, start 561 tools/perf/util/srcline.c ip - sym->start) < 0) start 90 tools/perf/util/stat.c ID(TRANSACTION_START, cpu/tx-start/), start 91 tools/perf/util/stat.c ID(ELISION_START, cpu/el-start/), start 85 tools/perf/util/svghelper.c void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) start 94 tools/perf/util/svghelper.c first_time = start; start 153 tools/perf/util/svghelper.c void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) start 155 tools/perf/util/svghelper.c double w = time2pixels(end) - time2pixels(start); start 164 tools/perf/util/svghelper.c time2pixels(start), start 172 tools/perf/util/svghelper.c void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) start 174 tools/perf/util/svghelper.c double w = time2pixels(end) - time2pixels(start); start 183 tools/perf/util/svghelper.c time2pixels(start), start 191 tools/perf/util/svghelper.c void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) start 193 tools/perf/util/svghelper.c double w = time2pixels(end) - time2pixels(start); start 202 tools/perf/util/svghelper.c time2pixels(start), start 210 tools/perf/util/svghelper.c void svg_box(int Yslot, u64 start, u64 end, const char *type) start 216 tools/perf/util/svghelper.c time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type); start 220 tools/perf/util/svghelper.c void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) start 227 tools/perf/util/svghelper.c time_to_string(end - start)); start 230 tools/perf/util/svghelper.c svg_box(Yslot, start, end, "blocked"); start 234 tools/perf/util/svghelper.c void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) start 242 tools/perf/util/svghelper.c if (svg_highlight && end - start > svg_highlight) start 249 tools/perf/util/svghelper.c cpu, time_to_string(end - start)); start 253 tools/perf/util/svghelper.c time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, start 256 tools/perf/util/svghelper.c text_size = (time2pixels(end)-time2pixels(start)); start 265 tools/perf/util/svghelper.c time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1); start 288 tools/perf/util/svghelper.c void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) start 299 tools/perf/util/svghelper.c if (end-start > 10 * NSEC_PER_MSEC) /* 10 msec */ start 302 tools/perf/util/svghelper.c text = time_to_string(end-start); start 304 tools/perf/util/svghelper.c font_size = 1.0 * (time2pixels(end)-time2pixels(start)); start 311 tools/perf/util/svghelper.c fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT); start 312 tools/perf/util/svghelper.c fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start)); start 316 tools/perf/util/svghelper.c time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style); start 382 tools/perf/util/svghelper.c void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace) start 390 tools/perf/util/svghelper.c if (svg_highlight && end - start >= svg_highlight) start 397 tools/perf/util/svghelper.c fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), cpu2y(cpu)); start 398 tools/perf/util/svghelper.c fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start)); start 402 tools/perf/util/svghelper.c time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type); start 403 tools/perf/util/svghelper.c width = time2pixels(end)-time2pixels(start); start 416 tools/perf/util/svghelper.c void svg_cstate(int cpu, u64 start, u64 end, int type) start 433 tools/perf/util/svghelper.c time2pixels(start), time2pixels(end)-time2pixels(start), start 436 tools/perf/util/svghelper.c width = (time2pixels(end)-time2pixels(start))/2.0; start 444 tools/perf/util/svghelper.c time2pixels(start), cpu2y(cpu)+width, width, type); start 473 tools/perf/util/svghelper.c void svg_pstate(int cpu, u64 start, u64 end, u64 freq) start 486 tools/perf/util/svghelper.c time2pixels(start), time2pixels(end), height, height); start 488 tools/perf/util/svghelper.c time2pixels(start), height+0.9, HzToHuman(freq)); start 494 tools/perf/util/svghelper.c void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace) start 514 tools/perf/util/svghelper.c time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32); start 517 tools/perf/util/svghelper.c time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2); start 521 tools/perf/util/svghelper.c time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row2 * SLOT_MULT); start 524 tools/perf/util/svghelper.c time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1); start 529 tools/perf/util/svghelper.c time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32); start 532 tools/perf/util/svghelper.c time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1); start 536 tools/perf/util/svghelper.c time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row1 * SLOT_MULT); start 539 tools/perf/util/svghelper.c time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2); start 547 tools/perf/util/svghelper.c time2pixels(start), height); start 552 tools/perf/util/svghelper.c void svg_wakeline(u64 start, int row1, int row2, const char *backtrace) start 567 tools/perf/util/svghelper.c time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT); start 570 tools/perf/util/svghelper.c time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT); start 576 tools/perf/util/svghelper.c time2pixels(start), height); start 581 tools/perf/util/svghelper.c void svg_interrupt(u64 start, int row, const char *backtrace) start 594 tools/perf/util/svghelper.c time2pixels(start), row * SLOT_MULT); start 596 tools/perf/util/svghelper.c time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT); start 601 tools/perf/util/svghelper.c void svg_text(int Yslot, u64 start, const char *text) start 607 tools/perf/util/svghelper.c time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text); start 9 tools/perf/util/svghelper.h void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end); start 10 tools/perf/util/svghelper.h void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges); start 11 tools/perf/util/svghelper.h void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges); start 12 tools/perf/util/svghelper.h void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges); start 13 tools/perf/util/svghelper.h void svg_box(int Yslot, u64 start, u64 end, const char *type); start 14 tools/perf/util/svghelper.h void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); start 15 tools/perf/util/svghelper.h void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); start 16 tools/perf/util/svghelper.h void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); start 20 tools/perf/util/svghelper.h void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace); start 21 tools/perf/util/svghelper.h void svg_cstate(int cpu, u64 start, u64 end, int type); start 22 tools/perf/util/svghelper.h void svg_pstate(int cpu, u64 start, u64 end, u64 freq); start 28 tools/perf/util/svghelper.h void svg_wakeline(u64 start, int row1, int row2, const char *backtrace); start 29 tools/perf/util/svghelper.h void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace); start 30 tools/perf/util/svghelper.h void svg_interrupt(u64 start, int row, const char *backtrace); start 31 tools/perf/util/svghelper.h void svg_text(int Yslot, u64 start, const char *text); start 871 tools/perf/util/symbol-elf.c map->start = shdr->sh_addr + ref_reloc(kmap); start 872 tools/perf/util/symbol-elf.c map->end = map->start + shdr->sh_size; start 907 tools/perf/util/symbol-elf.c u64 start = sym->st_value; start 910 tools/perf/util/symbol-elf.c start += map->start + shdr->sh_offset; start 918 tools/perf/util/symbol-elf.c curr_map = map__new2(start, curr_dso); start 924 tools/perf/util/symbol-elf.c curr_map->start = shdr->sh_addr + ref_reloc(kmap); start 925 tools/perf/util/symbol-elf.c curr_map->end = curr_map->start + shdr->sh_size; start 1055 tools/perf/util/symbol-elf.c map->reloc = map->start - dso->text_offset; start 1520 tools/perf/util/symbol-elf.c u64 start) start 1528 tools/perf/util/symbol-elf.c if (start > kci->last_module_symbol) start 1529 tools/perf/util/symbol-elf.c kci->last_module_symbol = start; start 1533 tools/perf/util/symbol-elf.c if (!kci->first_symbol || start < kci->first_symbol) start 1534 tools/perf/util/symbol-elf.c kci->first_symbol = start; start 1536 tools/perf/util/symbol-elf.c if (!kci->last_symbol || start > kci->last_symbol) start 1537 tools/perf/util/symbol-elf.c kci->last_symbol = start; start 1540 tools/perf/util/symbol-elf.c kci->stext = start; start 1545 tools/perf/util/symbol-elf.c kci->etext = start; start 1549 tools/perf/util/symbol-elf.c if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start)) start 1574 tools/perf/util/symbol-elf.c u64 start, u64 size __maybe_unused) start 1578 tools/perf/util/symbol-elf.c if (!kci->first_module || start < kci->first_module) start 1579 tools/perf/util/symbol-elf.c kci->first_module = start; start 1601 tools/perf/util/symbol-elf.c static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end, start 1606 tools/perf/util/symbol-elf.c if (s < start || s >= end) start 1609 tools/perf/util/symbol-elf.c offset = (s - start) + pgoff; start 1615 tools/perf/util/symbol-elf.c static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) start 1618 tools/perf/util/symbol-elf.c u64 end = start + len; start 1621 tools/perf/util/symbol-elf.c if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext)) start 1624 tools/perf/util/symbol-elf.c if (kcore_copy__map(kci, start, end, pgoff, kci->first_module, start 1631 tools/perf/util/symbol-elf.c if (kcore_copy__map(kci, start, end, pgoff, s, s + len)) start 2280 tools/perf/util/symbol-elf.c int sdt_notes__get_count(struct list_head *start) start 2285 tools/perf/util/symbol-elf.c list_for_each_entry(sdt_ptr, start, note_list) start 105 tools/perf/util/symbol.c p->end = c->start; start 143 tools/perf/util/symbol.c a = syma->end - syma->start; start 144 tools/perf/util/symbol.c b = symb->end - symb->start; start 204 tools/perf/util/symbol.c if (curr->start != next->start) start 233 tools/perf/util/symbol.c if (prev->end == prev->start && prev->end != curr->start) start 238 tools/perf/util/symbol.c if (curr->end == curr->start) start 239 tools/perf/util/symbol.c curr->end = roundup(curr->start, 4096) + 4096; start 255 tools/perf/util/symbol.c curr->end = next->start; start 270 tools/perf/util/symbol.c struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) start 286 tools/perf/util/symbol.c sym->start = start; start 287 tools/perf/util/symbol.c sym->end = len ? start + len : start; start 293 tools/perf/util/symbol.c __func__, name, start, sym->end); start 322 tools/perf/util/symbol.c const u64 ip = sym->start; start 340 tools/perf/util/symbol.c if (ip < s->start) start 368 tools/perf/util/symbol.c if (ip < s->start) start 370 tools/perf/util/symbol.c else if (ip > s->end || (ip == s->end && ip != s->start)) start 515 tools/perf/util/symbol.c if (dso->last_find_result.addr >= sym->start && start 517 tools/perf/util/symbol.c sym->start == sym->end)) { start 576 tools/perf/util/symbol.c u64 start, u64 size)) start 589 tools/perf/util/symbol.c u64 start, size; start 612 tools/perf/util/symbol.c hex2u64(sep + 1, &start); start 626 tools/perf/util/symbol.c err = process_module(arg, name, start, size); start 669 tools/perf/util/symbol.c char type, u64 start) start 683 tools/perf/util/symbol.c sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); start 731 tools/perf/util/symbol.c curr_map = map_groups__find(kmaps, pos->start); start 738 tools/perf/util/symbol.c pos->start -= curr_map->start - curr_map->pgoff; start 742 tools/perf/util/symbol.c pos->end -= curr_map->start - curr_map->pgoff; start 822 tools/perf/util/symbol.c pos->start = curr_map->map_ip(curr_map, pos->start); start 840 tools/perf/util/symbol.c pos->start -= delta; start 864 tools/perf/util/symbol.c curr_map = map__new2(pos->start, ndso); start 875 tools/perf/util/symbol.c pos->start -= delta; start 922 tools/perf/util/symbol.c u64 start; start 979 tools/perf/util/symbol.c static int __read_proc_modules(void *arg, const char *name, u64 start, start 990 tools/perf/util/symbol.c mi->start = start; start 1038 tools/perf/util/symbol.c if (from_m->start != to_m->start || start 1084 tools/perf/util/symbol.c if (!mi || mi->start != old_map->start) { start 1149 tools/perf/util/symbol.c u64 start; start 1152 tools/perf/util/symbol.c kmap->ref_reloc_sym->name, &start)) start 1154 tools/perf/util/symbol.c if (start != kmap->ref_reloc_sym->addr) start 1166 tools/perf/util/symbol.c static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) start 1171 tools/perf/util/symbol.c map = map__new2(start, md->dso); start 1175 tools/perf/util/symbol.c map->end = map->start + len; start 1196 tools/perf/util/symbol.c if (new_map->end < old_map->start || start 1197 tools/perf/util/symbol.c new_map->start >= old_map->end) start 1200 tools/perf/util/symbol.c if (new_map->start < old_map->start) { start 1210 tools/perf/util/symbol.c new_map->end = old_map->start; start 1221 tools/perf/util/symbol.c m->end = old_map->start; start 1223 tools/perf/util/symbol.c new_map->start = old_map->end; start 1243 tools/perf/util/symbol.c new_map->start = old_map->end; start 1332 tools/perf/util/symbol.c if (stext >= new_map->start && stext < new_map->end) { start 1347 tools/perf/util/symbol.c map->start = new_map->start; start 1479 tools/perf/util/symbol.c u64 start, size; start 1492 tools/perf/util/symbol.c len = hex2u64(line, &start); start 1504 tools/perf/util/symbol.c sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); start 47 tools/perf/util/symbol.h u64 start; start 76 tools/perf/util/symbol.h return sym->end - sym->start; start 110 tools/perf/util/symbol.h u64 start; start 159 tools/perf/util/symbol.h u64 start, u64 size)); start 169 tools/perf/util/symbol.h struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name); start 201 tools/perf/util/symbol.h typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); start 265 tools/perf/util/symbol.h int sdt_notes__get_count(struct list_head *start); start 13 tools/perf/util/symbol_fprintf.c sym->start, sym->end, start 31 tools/perf/util/symbol_fprintf.c offset = al->addr - sym->start; start 33 tools/perf/util/symbol_fprintf.c offset = al->addr - al->map->start - sym->start; start 336 tools/perf/util/synthetic-events.c &event->mmap2.start, &event->mmap2.len, prot, start 395 tools/perf/util/synthetic-events.c event->mmap2.len -= event->mmap.start; start 453 tools/perf/util/synthetic-events.c event->mmap.start = pos->start; start 454 tools/perf/util/synthetic-events.c event->mmap.len = pos->end - pos->start; start 641 tools/perf/util/synthetic-events.c int start, start 669 tools/perf/util/synthetic-events.c for (i = start; i < start + num; i++) { start 705 tools/perf/util/synthetic-events.c int start; start 715 tools/perf/util/synthetic-events.c args->start, args->num); start 777 tools/perf/util/synthetic-events.c args[i].start = i * args[i].num; start 780 tools/perf/util/synthetic-events.c base = args[i-1].start + args[i-1].num; start 783 tools/perf/util/synthetic-events.c args[j].start = base + (j - i) * args[i].num; start 859 tools/perf/util/synthetic-events.c event->mmap.start = map->start; start 860 tools/perf/util/synthetic-events.c event->mmap.len = map->end - event->mmap.start; start 821 tools/perf/util/thread-stack.c if (sym && sym == tsym && to_al->addr != tsym->start) { start 922 tools/perf/util/thread-stack.c to_al->addr != to_al->sym->start) start 959 tools/perf/util/thread-stack.c to_al->addr == to_al->sym->start) { start 231 tools/perf/util/thread.c if (second_last && !last->start && thread->pid_ == thread->tid) start 55 tools/perf/util/time-utils.c (parse_nsec_time(start_str, &ptime->start) != 0)) { start 67 tools/perf/util/time-utils.c static int split_start_end(char **start, char **end, const char *ostr, char ch) start 88 tools/perf/util/time-utils.c *start = start_str; start 103 tools/perf/util/time-utils.c ptime->start = 0; start 111 tools/perf/util/time-utils.c if (rc == 0 && ptime->end && ptime->end < ptime->start) start 114 tools/perf/util/time-utils.c pr_debug("start time %" PRIu64 ", ", ptime->start); start 167 tools/perf/util/time-utils.c if (ptime[i].end >= ptime[i + 1].start) { start 200 tools/perf/util/time-utils.c double end_pcnt, u64 start, u64 end) start 202 tools/perf/util/time-utils.c u64 total = end - start; start 209 tools/perf/util/time-utils.c ptime->start = start + round(start_pcnt * total); start 210 tools/perf/util/time-utils.c ptime->end = start + round(end_pcnt * total); start 212 tools/perf/util/time-utils.c if (ptime->end > ptime->start && ptime->end != end) start 219 tools/perf/util/time-utils.c u64 start, u64 end) start 250 tools/perf/util/time-utils.c return set_percent_time(ptime, start_pcnt, end_pcnt, start, end); start 254 tools/perf/util/time-utils.c u64 start, u64 end) start 276 tools/perf/util/time-utils.c return set_percent_time(ptime, start_pcnt, end_pcnt, start, end); start 280 tools/perf/util/time-utils.c u64 start, u64 end); start 283 tools/perf/util/time-utils.c const char *ostr, u64 start, u64 end, start 306 tools/perf/util/time-utils.c ret = (func)(p1, &ptime_buf[i], start, end); start 312 tools/perf/util/time-utils.c pr_debug("start time %d: %" PRIu64 ", ", i, ptime_buf[i].start); start 328 tools/perf/util/time-utils.c const char *ostr, u64 start, u64 end, char *c) start 350 tools/perf/util/time-utils.c ret = percent_slash_split(str, ptime_buf, start, end); start 359 tools/perf/util/time-utils.c const char *ostr, u64 start, u64 end) start 374 tools/perf/util/time-utils.c return percent_comma_split(ptime_buf, num, ostr, start, start 380 tools/perf/util/time-utils.c return percent_comma_split(ptime_buf, num, ostr, start, start 386 tools/perf/util/time-utils.c return one_percent_convert(ptime_buf, ostr, start, end, c); start 426 tools/perf/util/time-utils.c if ((ptime->start && timestamp < ptime->start) || start 452 tools/perf/util/time-utils.c if (timestamp >= ptime->start && start 502 tools/perf/util/time-utils.c ptime_range[i].start += session->evlist->first_sample_time; start 10 tools/perf/util/time-utils.h u64 start, end; start 18 tools/perf/util/time-utils.h const char *ostr, u64 start, u64 end); start 51 tools/perf/util/unwind-libdw.c if (s != al->map->start - al->map->pgoff) start 57 tools/perf/util/unwind-libdw.c (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff, start 129 tools/perf/util/unwind-libdw.c u64 start, end; start 133 tools/perf/util/unwind-libdw.c ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); start 137 tools/perf/util/unwind-libdw.c end = start + stack->size; start 143 tools/perf/util/unwind-libdw.c if (addr < start || addr + sizeof(Dwarf_Word) > end) { start 148 tools/perf/util/unwind-libdw.c addr, start, end); start 154 tools/perf/util/unwind-libdw.c offset = addr - start; start 57 tools/perf/util/unwind-libunwind-local.c const char *obj_name, unw_word_t start, start 394 tools/perf/util/unwind-libunwind-local.c di.start_ip = map->start; start 396 tools/perf/util/unwind-libunwind-local.c di.u.rti.segbase = map->start + segbase - map->pgoff; start 397 tools/perf/util/unwind-libunwind-local.c di.u.rti.table_data = map->start + table_data - map->pgoff; start 410 tools/perf/util/unwind-libunwind-local.c unw_word_t base = is_exec ? 0 : map->start; start 420 tools/perf/util/unwind-libunwind-local.c map->start, map->end)) start 491 tools/perf/util/unwind-libunwind-local.c u64 start, end; start 501 tools/perf/util/unwind-libunwind-local.c ret = perf_reg_value(&start, &ui->sample->user_regs, start 506 tools/perf/util/unwind-libunwind-local.c end = start + stack->size; start 512 tools/perf/util/unwind-libunwind-local.c if (addr < start || addr + sizeof(unw_word_t) >= end) { start 517 tools/perf/util/unwind-libunwind-local.c (void *) (uintptr_t) addr, start, end); start 524 tools/perf/util/unwind-libunwind-local.c offset = addr - start; start 75 tools/perf/util/vdso.c void *start, *end; start 82 tools/perf/util/vdso.c if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME)) start 85 tools/perf/util/vdso.c size = end - start; start 87 tools/perf/util/vdso.c buf = memdup(start, size); start 327 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c .start = amd_fam14h_start, start 206 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c .start = cpuidle_start, start 53 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c long long timespec_diff_us(struct timespec start, struct timespec end) start 56 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c if ((end.tv_nsec - start.tv_nsec) < 0) { start 57 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c temp.tv_sec = end.tv_sec - start.tv_sec - 1; start 58 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; start 60 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c temp.tv_sec = end.tv_sec - start.tv_sec; start 61 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c temp.tv_nsec = end.tv_nsec - start.tv_nsec; start 287 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c struct timespec start, end; start 290 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c clock_gettime(CLOCK_REALTIME, &start); start 293 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c monitors[num]->start(); start 316 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c timediff = timespec_diff_us(start, end); start 336 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c monitors[num]->start(); start 58 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h int (*start) (void); start 66 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h extern long long timespec_diff_us(struct timespec start, struct timespec end); start 186 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c .start = hsw_ext_start, start 332 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c .start = mperf_start, start 207 tools/power/cpupower/utils/idle_monitor/nhm_idle.c .start = nhm_start, start 191 tools/power/cpupower/utils/idle_monitor/snb_idle.c .start = snb_start, start 1342 tools/power/x86/intel-speed-select/isst-config.c unsigned int start, end; start 1351 tools/power/x86/intel-speed-select/isst-config.c start = strtoul(next, &next, 10); start 1354 tools/power/x86/intel-speed-select/isst-config.c target_cpus[max_target_cpus++] = start; start 1375 tools/power/x86/intel-speed-select/isst-config.c if (end <= start) start 1378 tools/power/x86/intel-speed-select/isst-config.c while (++start <= end) { start 1380 tools/power/x86/intel-speed-select/isst-config.c target_cpus[max_target_cpus++] = start; start 1404 tools/power/x86/intel-speed-select/isst-config.c static void parse_cmd_args(int argc, int start, char **argv) start 1426 tools/power/x86/intel-speed-select/isst-config.c option_index = start; start 1428 tools/power/x86/intel-speed-select/isst-config.c optind = start + 1; start 5580 tools/power/x86/turbostat/turbostat.c unsigned int start, end; start 5612 tools/power/x86/turbostat/turbostat.c start = strtoul(next, &next, 10); start 5614 tools/power/x86/turbostat/turbostat.c if (start >= CPU_SUBSET_MAXCPUS) start 5616 tools/power/x86/turbostat/turbostat.c CPU_SET_S(start, cpu_subset_size, cpu_subset); start 5637 tools/power/x86/turbostat/turbostat.c if (end <= start) start 5640 tools/power/x86/turbostat/turbostat.c while (++start <= end) { start 5641 tools/power/x86/turbostat/turbostat.c if (start >= CPU_SUBSET_MAXCPUS) start 5643 tools/power/x86/turbostat/turbostat.c CPU_SET_S(start, cpu_subset_size, cpu_subset); start 15 tools/testing/nvdimm/dax-dev.c addr = pgoff * PAGE_SIZE + res->start; start 16 tools/testing/nvdimm/dax-dev.c if (addr >= res->start && addr <= res->end) { start 72 tools/testing/nvdimm/test/iomap.c - nfit_res->res.start; start 83 tools/testing/nvdimm/test/iomap.c - nfit_res->res.start; start 94 tools/testing/nvdimm/test/iomap.c return nfit_res->buf + offset - nfit_res->res.start; start 129 tools/testing/nvdimm/test/iomap.c resource_size_t offset = pgmap->res.start; start 155 tools/testing/nvdimm/test/iomap.c return nfit_res->buf + offset - nfit_res->res.start; start 175 tools/testing/nvdimm/test/iomap.c return nfit_res->buf + offset - nfit_res->res.start; start 222 tools/testing/nvdimm/test/iomap.c struct resource *parent, resource_size_t start, start 229 tools/testing/nvdimm/test/iomap.c WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start, start 236 tools/testing/nvdimm/test/iomap.c resource_size_t start = *((resource_size_t *) match_data); start 238 tools/testing/nvdimm/test/iomap.c return res->start == start; start 242 tools/testing/nvdimm/test/iomap.c struct resource *parent, resource_size_t start, start 246 tools/testing/nvdimm/test/iomap.c struct nfit_test_resource *nfit_res = get_nfit_res(start); start 254 tools/testing/nvdimm/test/iomap.c &start); start 260 tools/testing/nvdimm/test/iomap.c if (req->res.start == start) { start 269 tools/testing/nvdimm/test/iomap.c __func__, start, n, res); start 279 tools/testing/nvdimm/test/iomap.c struct resource *parent, resource_size_t start, start 285 tools/testing/nvdimm/test/iomap.c nfit_res = get_nfit_res(start); start 290 tools/testing/nvdimm/test/iomap.c if (start + n > nfit_res->res.start start 293 tools/testing/nvdimm/test/iomap.c __func__, start, n, start 300 tools/testing/nvdimm/test/iomap.c if (start == req->res.start) { start 317 tools/testing/nvdimm/test/iomap.c res->start = start; start 318 tools/testing/nvdimm/test/iomap.c res->end = start + n - 1; start 343 tools/testing/nvdimm/test/iomap.c return __devm_request_region(dev, parent, start, n, name); start 344 tools/testing/nvdimm/test/iomap.c return __request_region(parent, start, n, name, flags); start 348 tools/testing/nvdimm/test/iomap.c resource_size_t start, resource_size_t n, const char *name, start 351 tools/testing/nvdimm/test/iomap.c return nfit_test_request_region(NULL, parent, start, n, name, flags); start 357 tools/testing/nvdimm/test/iomap.c if (get_nfit_res(res->start)) start 365 tools/testing/nvdimm/test/iomap.c if (get_nfit_res(res->start)) start 372 tools/testing/nvdimm/test/iomap.c struct resource *parent, resource_size_t start, start 377 tools/testing/nvdimm/test/iomap.c return nfit_test_request_region(dev, parent, start, n, name, 0); start 381 tools/testing/nvdimm/test/iomap.c void __wrap___release_region(struct resource *parent, resource_size_t start, start 384 tools/testing/nvdimm/test/iomap.c if (!nfit_test_release_region(NULL, parent, start, n)) start 385 tools/testing/nvdimm/test/iomap.c __release_region(parent, start, n); start 390 tools/testing/nvdimm/test/iomap.c resource_size_t start, resource_size_t n) start 392 tools/testing/nvdimm/test/iomap.c if (!nfit_test_release_region(dev, parent, start, n)) start 393 tools/testing/nvdimm/test/iomap.c __devm_release_region(dev, parent, start, n); start 545 tools/testing/nvdimm/test/nfit.c u64 be_end = be->start + be->length - 1; start 549 tools/testing/nvdimm/test/nfit.c if (be_end < addr || be->start > end) start 552 tools/testing/nvdimm/test/nfit.c rstart = (be->start < addr) ? addr : be->start; start 889 tools/testing/nvdimm/test/nfit.c err_stat->record[i].err_inj_stat_spa_range_base = be->start; start 1422 tools/testing/nvdimm/test/nfit.c gen_pool_free(nfit_pool, nfit_res->res.start, start 1445 tools/testing/nvdimm/test/nfit.c nfit_res->res.start = *dma; start 1491 tools/testing/nvdimm/test/nfit.c if (addr >= n->res.start && (addr < n->res.start start 18 tools/testing/radix-tree/benchmark.c struct timespec start, finish; start 26 tools/testing/radix-tree/benchmark.c clock_gettime(CLOCK_MONOTONIC, &start); start 38 tools/testing/radix-tree/benchmark.c nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + start 39 tools/testing/radix-tree/benchmark.c (finish.tv_nsec - start.tv_nsec); start 55 tools/testing/radix-tree/benchmark.c struct timespec start, finish; start 59 tools/testing/radix-tree/benchmark.c clock_gettime(CLOCK_MONOTONIC, &start); start 66 tools/testing/radix-tree/benchmark.c nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + start 67 tools/testing/radix-tree/benchmark.c (finish.tv_nsec - start.tv_nsec); start 76 tools/testing/radix-tree/benchmark.c struct timespec start, finish; start 80 tools/testing/radix-tree/benchmark.c clock_gettime(CLOCK_MONOTONIC, &start); start 87 tools/testing/radix-tree/benchmark.c nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + start 88 tools/testing/radix-tree/benchmark.c (finish.tv_nsec - start.tv_nsec); start 97 tools/testing/radix-tree/benchmark.c struct timespec start, finish; start 101 tools/testing/radix-tree/benchmark.c clock_gettime(CLOCK_MONOTONIC, &start); start 108 tools/testing/radix-tree/benchmark.c nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + start 109 tools/testing/radix-tree/benchmark.c (finish.tv_nsec - start.tv_nsec); start 5 tools/testing/radix-tree/bitmap.c void bitmap_clear(unsigned long *map, unsigned int start, int len) start 7 tools/testing/radix-tree/bitmap.c unsigned long *p = map + BIT_WORD(start); start 8 tools/testing/radix-tree/bitmap.c const unsigned int size = start + len; start 9 tools/testing/radix-tree/bitmap.c int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); start 10 tools/testing/radix-tree/bitmap.c unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); start 286 tools/testing/radix-tree/idr-test.c time_t start = time(NULL); start 293 tools/testing/radix-tree/idr-test.c } while (time(NULL) < start + 10); start 302 tools/testing/radix-tree/idr-test.c time_t start = time(NULL); start 313 tools/testing/radix-tree/idr-test.c } while (time(NULL) < start + 11); start 50 tools/testing/radix-tree/main.c unsigned long start; start 53 tools/testing/radix-tree/main.c start = 0; start 58 tools/testing/radix-tree/main.c __gang_check(start, rand() % 113 + 1, rand() % 71, start 60 tools/testing/radix-tree/main.c old_start = start; start 61 tools/testing/radix-tree/main.c start += rand() % 1000000; start 62 tools/testing/radix-tree/main.c start %= 1ULL << 33; start 63 tools/testing/radix-tree/main.c if (start < old_start) start 126 tools/testing/radix-tree/main.c void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsigned long end, unsigned long *idx, int count, int fromtag, int totag) start 133 tools/testing/radix-tree/main.c if (idx[i] < start || idx[i] > end) { start 135 tools/testing/radix-tree/main.c printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, start 145 tools/testing/radix-tree/main.c printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, end, start 160 tools/testing/radix-tree/main.c unsigned long start, end, count = 0, tagged, cur, tmp; start 164 tools/testing/radix-tree/main.c start = rand(); start 166 tools/testing/radix-tree/main.c if (start > end && (rand() % 10)) { start 167 tools/testing/radix-tree/main.c cur = start; start 168 tools/testing/radix-tree/main.c start = end; start 175 tools/testing/radix-tree/main.c item_insert(&tree, start); start 177 tools/testing/radix-tree/main.c if (start <= end) start 179 tools/testing/radix-tree/main.c item_tag_set(&tree, start, 0); start 183 tools/testing/radix-tree/main.c item_insert(&tree, start-1); start 185 tools/testing/radix-tree/main.c item_tag_set(&tree, start-1, 0); start 190 tools/testing/radix-tree/main.c if (start <= end) start 209 tools/testing/radix-tree/main.c if (idx[i] >= start && idx[i] <= end) start 217 tools/testing/radix-tree/main.c tagged = tag_tagged_items(&tree, start, end, ITEMS, XA_MARK_0, XA_MARK_1); start 221 tools/testing/radix-tree/main.c check_copied_tags(&tree, start, end, idx, ITEMS, 0, 1); start 226 tools/testing/radix-tree/main.c tagged = tag_tagged_items(&tree, start, end, tmp, XA_MARK_0, XA_MARK_2); start 231 tools/testing/radix-tree/main.c check_copied_tags(&tree, start, end, idx, ITEMS, 0, 2); start 79 tools/testing/radix-tree/regression1.c static unsigned find_get_pages(unsigned long start, start 82 tools/testing/radix-tree/regression1.c XA_STATE(xas, &mt_tree, start); start 81 tools/testing/radix-tree/regression2.c unsigned long int start, end; start 93 tools/testing/radix-tree/regression2.c start = 0; start 95 tools/testing/radix-tree/regression2.c tag_tagged_items(&mt_tree, start, end, 1, start 112 tools/testing/radix-tree/regression2.c start = 1; start 114 tools/testing/radix-tree/regression2.c radix_tree_gang_lookup_tag_slot(&mt_tree, (void ***)pages, start, end, start 117 tools/testing/radix-tree/test.c unsigned long start, unsigned long nr, start 132 tools/testing/radix-tree/test.c start + into, nr_to_find); start 135 tools/testing/radix-tree/test.c assert(items[i]->index == start + into + i); start 143 tools/testing/radix-tree/test.c void item_full_scan(struct radix_tree_root *root, unsigned long start, start 148 tools/testing/radix-tree/test.c unsigned long this_index = start; start 166 tools/testing/radix-tree/test.c assert(this_index == start + nr); start 173 tools/testing/radix-tree/test.c int tag_tagged_items(struct xarray *xa, unsigned long start, unsigned long end, start 176 tools/testing/radix-tree/test.c XA_STATE(xas, xa, start); start 24 tools/testing/radix-tree/test.h unsigned long start, unsigned long nr, start 26 tools/testing/radix-tree/test.h void item_full_scan(struct radix_tree_root *root, unsigned long start, start 30 tools/testing/radix-tree/test.h int tag_tagged_items(struct xarray *, unsigned long start, unsigned long end, start 125 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) = start 127 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) = start 129 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) = start 5 tools/testing/selftests/bpf/prog_tests/attach_probe.c size_t start, offset; start 14 tools/testing/selftests/bpf/prog_tests/attach_probe.c &start, buf, &offset) == 3) { start 17 tools/testing/selftests/bpf/prog_tests/attach_probe.c return start - offset; start 119 tools/testing/selftests/bpf/progs/xdping_kern.c if (pinginfo->start) { start 128 tools/testing/selftests/bpf/progs/xdping_kern.c pinginfo->start; start 129 tools/testing/selftests/bpf/progs/xdping_kern.c pinginfo->start = 0; start 148 tools/testing/selftests/bpf/progs/xdping_kern.c pinginfo->start = bpf_ktime_get_ns(); start 3511 tools/testing/selftests/bpf/test_btf.c static const char *get_next_str(const char *start, const char *end) start 3513 tools/testing/selftests/bpf/test_btf.c return start < end - 1 ? start + 1 : NULL; start 143 tools/testing/selftests/bpf/test_flow_dissector.c static unsigned long add_csum_hword(const uint16_t *start, int num_u16) start 149 tools/testing/selftests/bpf/test_flow_dissector.c sum += start[i]; start 154 tools/testing/selftests/bpf/test_flow_dissector.c static uint16_t build_ip_csum(const uint16_t *start, int num_u16, start 157 tools/testing/selftests/bpf/test_flow_dissector.c sum += add_csum_hword(start, num_u16); start 364 tools/testing/selftests/bpf/test_progs.c int i, set_len = 0, num, start = 0, end = -1; start 377 tools/testing/selftests/bpf/test_progs.c start = num; start 395 tools/testing/selftests/bpf/test_progs.c if (start > end) start 407 tools/testing/selftests/bpf/test_progs.c for (i = start; i <= end; i++) { start 310 tools/testing/selftests/bpf/test_sockmap.c struct timespec start; start 346 tools/testing/selftests/bpf/test_sockmap.c clock_gettime(CLOCK_MONOTONIC, &s->start); start 465 tools/testing/selftests/bpf/test_sockmap.c clock_gettime(CLOCK_MONOTONIC, &s->start); start 504 tools/testing/selftests/bpf/test_sockmap.c err = clock_gettime(CLOCK_MONOTONIC, &s->start); start 594 tools/testing/selftests/bpf/test_sockmap.c return s.bytes_sent / (s.end.tv_sec - s.start.tv_sec); start 599 tools/testing/selftests/bpf/test_sockmap.c return s.bytes_recvd / (s.end.tv_sec - s.start.tv_sec); start 647 tools/testing/selftests/bpf/test_sockmap.c if (s.end.tv_sec - s.start.tv_sec) { start 677 tools/testing/selftests/bpf/test_sockmap.c if (s.end.tv_sec - s.start.tv_sec) { start 178 tools/testing/selftests/bpf/test_sockmap_kern.h int *start, *end, *start_push, *end_push, *start_pop, *pop; start 186 tools/testing/selftests/bpf/test_sockmap_kern.h start = bpf_map_lookup_elem(&sock_bytes, &zero); start 188 tools/testing/selftests/bpf/test_sockmap_kern.h if (start && end) start 189 tools/testing/selftests/bpf/test_sockmap_kern.h bpf_msg_pull_data(msg, *start, *end, 0); start 205 tools/testing/selftests/bpf/test_sockmap_kern.h int *start, *end, *start_push, *end_push, *start_pop, *pop; start 216 tools/testing/selftests/bpf/test_sockmap_kern.h start = bpf_map_lookup_elem(&sock_bytes, &zero); start 218 tools/testing/selftests/bpf/test_sockmap_kern.h if (start && end) { start 222 tools/testing/selftests/bpf/test_sockmap_kern.h start ? *start : 0, end ? *end : 0); start 223 tools/testing/selftests/bpf/test_sockmap_kern.h err = bpf_msg_pull_data(msg, *start, *end, 0); start 271 tools/testing/selftests/bpf/test_sockmap_kern.h int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; start 281 tools/testing/selftests/bpf/test_sockmap_kern.h start = bpf_map_lookup_elem(&sock_bytes, &zero); start 283 tools/testing/selftests/bpf/test_sockmap_kern.h if (start && end) start 284 tools/testing/selftests/bpf/test_sockmap_kern.h bpf_msg_pull_data(msg, *start, *end, 0); start 311 tools/testing/selftests/bpf/test_sockmap_kern.h int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; start 326 tools/testing/selftests/bpf/test_sockmap_kern.h start = bpf_map_lookup_elem(&sock_bytes, &zero); start 328 tools/testing/selftests/bpf/test_sockmap_kern.h if (start && end) { start 330 tools/testing/selftests/bpf/test_sockmap_kern.h start ? *start : 0, end ? *end : 0); start 331 tools/testing/selftests/bpf/test_sockmap_kern.h err = bpf_msg_pull_data(msg, *start, *end, 0); start 425 tools/testing/selftests/bpf/test_sockmap_kern.h int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; start 434 tools/testing/selftests/bpf/test_sockmap_kern.h start = bpf_map_lookup_elem(&sock_bytes, &zero); start 436 tools/testing/selftests/bpf/test_sockmap_kern.h if (start && end) start 437 tools/testing/selftests/bpf/test_sockmap_kern.h bpf_msg_pull_data(msg, *start, *end, 0); start 50 tools/testing/selftests/bpf/trace_helpers.c int start = 0, end = sym_cnt; start 57 tools/testing/selftests/bpf/trace_helpers.c while (start < end) { start 58 tools/testing/selftests/bpf/trace_helpers.c size_t mid = start + (end - start) / 2; start 64 tools/testing/selftests/bpf/trace_helpers.c start = mid + 1; start 69 tools/testing/selftests/bpf/trace_helpers.c if (start >= 1 && syms[start - 1].addr < key && start 70 tools/testing/selftests/bpf/trace_helpers.c key < syms[start].addr) start 72 tools/testing/selftests/bpf/trace_helpers.c return &syms[start - 1]; start 8 tools/testing/selftests/bpf/xdping.h __u64 start; start 26 tools/testing/selftests/intel_pstate/aperf.c long long int start, finish, total; start 76 tools/testing/selftests/intel_pstate/aperf.c start = before.time*1000 + before.millitm; start 78 tools/testing/selftests/intel_pstate/aperf.c total = finish - start; start 162 tools/testing/selftests/kvm/include/kvm_util.h kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, start 51 tools/testing/selftests/kvm/include/sparsebit.h sparsebit_idx_t start, sparsebit_num_t num); start 53 tools/testing/selftests/kvm/include/sparsebit.h sparsebit_idx_t start, sparsebit_num_t num); start 56 tools/testing/selftests/kvm/include/sparsebit.h void sparsebit_set_num(struct sparsebit *sbitp, sparsebit_idx_t start, start 62 tools/testing/selftests/kvm/include/sparsebit.h sparsebit_idx_t start, sparsebit_num_t num); start 27 tools/testing/selftests/kvm/lib/aarch64/ucall.c vm_paddr_t gpa, start, end, step, offset; start 57 tools/testing/selftests/kvm/lib/aarch64/ucall.c start = end * 5 / 8; start 59 tools/testing/selftests/kvm/lib/aarch64/ucall.c for (offset = 0; offset < end - start; offset += step) { start 60 tools/testing/selftests/kvm/lib/aarch64/ucall.c if (ucall_mmio_init(vm, start - offset)) start 62 tools/testing/selftests/kvm/lib/aarch64/ucall.c if (ucall_mmio_init(vm, start + offset)) start 320 tools/testing/selftests/kvm/lib/kvm_util.c userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) start 329 tools/testing/selftests/kvm/lib/kvm_util.c if (start <= existing_end && end >= existing_start) start 353 tools/testing/selftests/kvm/lib/kvm_util.c kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, start 358 tools/testing/selftests/kvm/lib/kvm_util.c region = userspace_mem_region_find(vm, start, end); start 901 tools/testing/selftests/kvm/lib/sparsebit.c static inline sparsebit_idx_t node_first_set(struct node *nodep, int start) start 903 tools/testing/selftests/kvm/lib/sparsebit.c mask_t leading = (mask_t)1 << start; start 909 tools/testing/selftests/kvm/lib/sparsebit.c static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start) start 911 tools/testing/selftests/kvm/lib/sparsebit.c mask_t leading = (mask_t)1 << start; start 1159 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_idx_t start; start 1218 tools/testing/selftests/kvm/lib/sparsebit.c start = lowest_possible - candidate->idx; start 1220 tools/testing/selftests/kvm/lib/sparsebit.c if (start < MASK_BITS && candidate->mask >= (1 << start)) start 1221 tools/testing/selftests/kvm/lib/sparsebit.c return node_first_set(candidate, start); start 1305 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_idx_t start, sparsebit_num_t num) start 1311 tools/testing/selftests/kvm/lib/sparsebit.c for (idx = sparsebit_next_set(s, start); start 1340 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_idx_t start, sparsebit_num_t num) start 1346 tools/testing/selftests/kvm/lib/sparsebit.c for (idx = sparsebit_next_clear(s, start); start 1372 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_idx_t start, sparsebit_num_t num) start 1381 tools/testing/selftests/kvm/lib/sparsebit.c assert(start + num - 1 >= start); start 1403 tools/testing/selftests/kvm/lib/sparsebit.c for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--) start 1454 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_idx_t start, sparsebit_num_t num) start 1463 tools/testing/selftests/kvm/lib/sparsebit.c assert(start + num - 1 >= start); start 1466 tools/testing/selftests/kvm/lib/sparsebit.c for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--) start 348 tools/testing/selftests/mqueue/mq_perf_tests.c clock_gettime(clock, &start); \ start 355 tools/testing/selftests/mqueue/mq_perf_tests.c nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \ start 356 tools/testing/selftests/mqueue/mq_perf_tests.c (middle.tv_nsec - start.tv_nsec); \ start 429 tools/testing/selftests/mqueue/mq_perf_tests.c struct timespec res, start, middle, end, send_total, recv_total; start 482 tools/testing/selftests/mqueue/mq_perf_tests.c clock_gettime(clock, &start); start 488 tools/testing/selftests/mqueue/mq_perf_tests.c nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) * start 489 tools/testing/selftests/mqueue/mq_perf_tests.c 1000000000) + (end.tv_nsec - start.tv_nsec); start 511 tools/testing/selftests/mqueue/mq_perf_tests.c clock_gettime(clock, &start); start 514 tools/testing/selftests/mqueue/mq_perf_tests.c nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) * start 515 tools/testing/selftests/mqueue/mq_perf_tests.c 1000000000) + (end.tv_nsec - start.tv_nsec); start 107 tools/testing/selftests/net/msg_zerocopy.c static uint16_t get_ip_csum(const uint16_t *start, int num_words) start 113 tools/testing/selftests/net/msg_zerocopy.c sum += start[i]; start 54 tools/testing/selftests/net/psock_snd.c static unsigned long add_csum_hword(const uint16_t *start, int num_u16) start 60 tools/testing/selftests/net/psock_snd.c sum += start[i]; start 65 tools/testing/selftests/net/psock_snd.c static uint16_t build_ip_csum(const uint16_t *start, int num_u16, start 68 tools/testing/selftests/net/psock_snd.c sum += add_csum_hword(start, num_u16); start 306 tools/testing/selftests/networking/timestamping/txtimestamp.c static uint16_t get_ip_csum(const uint16_t *start, int num_words, start 312 tools/testing/selftests/networking/timestamping/txtimestamp.c sum += start[i]; start 136 tools/testing/selftests/powerpc/mm/tlbie_test.c static inline unsigned int compute_word_offset(char *start, unsigned int *addr) start 139 tools/testing/selftests/powerpc/mm/tlbie_test.c delta_bytes = (unsigned long)addr - (unsigned long)start; start 207 tools/testing/selftests/powerpc/mm/tlbie_test.c char *start = compute_chunk_start_addr(tid); start 208 tools/testing/selftests/powerpc/mm/tlbie_test.c unsigned int word_offset = compute_word_offset(start, addr); start 154 tools/testing/selftests/powerpc/pmu/lib.c unsigned long start, end; start 168 tools/testing/selftests/powerpc/pmu/lib.c &start, &end, &execute, name); start 176 tools/testing/selftests/powerpc/pmu/lib.c libc.first = start; start 179 tools/testing/selftests/powerpc/pmu/lib.c vdso.first = start; start 28 tools/testing/selftests/proc/proc-uptime-001.c uint64_t start, u0, u1, i0, i1; start 35 tools/testing/selftests/proc/proc-uptime-001.c start = u0; start 42 tools/testing/selftests/proc/proc-uptime-001.c } while (u1 - start < 100); start 399 tools/testing/selftests/ptp/testptp.c perout_request.start.sec = ts.tv_sec + 2; start 400 tools/testing/selftests/ptp/testptp.c perout_request.start.nsec = 0; start 23 tools/testing/selftests/seccomp/seccomp_benchmark.c struct timespec start, finish; start 26 tools/testing/selftests/seccomp/seccomp_benchmark.c assert(clock_gettime(clk_id, &start) == 0); start 33 tools/testing/selftests/seccomp/seccomp_benchmark.c i = finish.tv_sec - start.tv_sec; start 35 tools/testing/selftests/seccomp/seccomp_benchmark.c i += finish.tv_nsec - start.tv_nsec; start 39 tools/testing/selftests/seccomp/seccomp_benchmark.c start.tv_sec, start.tv_nsec, start 187 tools/testing/selftests/sparc64/drivers/adi-test.c long start, end, elapsed_time = 0; start 190 tools/testing/selftests/sparc64/drivers/adi-test.c RDTICK(start); start 196 tools/testing/selftests/sparc64/drivers/adi-test.c elapsed_time += end - start; start 213 tools/testing/selftests/sparc64/drivers/adi-test.c long start, end, elapsed_time = 0; start 217 tools/testing/selftests/sparc64/drivers/adi-test.c RDTICK(start); start 224 tools/testing/selftests/sparc64/drivers/adi-test.c elapsed_time += end - start; start 243 tools/testing/selftests/sparc64/drivers/adi-test.c long start, end, elapsed_time = 0; start 246 tools/testing/selftests/sparc64/drivers/adi-test.c RDTICK(start); start 252 tools/testing/selftests/sparc64/drivers/adi-test.c elapsed_time += (end - start); start 268 tools/testing/selftests/sparc64/drivers/adi-test.c long start, end, elapsed_time = 0; start 273 tools/testing/selftests/sparc64/drivers/adi-test.c RDTICK(start); start 283 tools/testing/selftests/sparc64/drivers/adi-test.c elapsed_time += (end - start); start 299 tools/testing/selftests/sparc64/drivers/adi-test.c long start, end; start 302 tools/testing/selftests/sparc64/drivers/adi-test.c RDTICK(start); start 309 tools/testing/selftests/sparc64/drivers/adi-test.c DEBUG_PRINT_T("\tlseek elapsed timed = %ld\n", end - start); start 310 tools/testing/selftests/sparc64/drivers/adi-test.c update_stats(&seek_stats, end - start, 0); start 59 tools/testing/selftests/timers/adjtick.c long long diff_timespec(struct timespec start, struct timespec end) start 63 tools/testing/selftests/timers/adjtick.c start_ns = ts_to_nsec(start); start 71 tools/testing/selftests/timers/adjtick.c struct timespec start, mid, end; start 82 tools/testing/selftests/timers/adjtick.c clock_gettime(CLOCK_MONOTONIC, &start); start 86 tools/testing/selftests/timers/adjtick.c newdiff = diff_timespec(start, end); start 90 tools/testing/selftests/timers/adjtick.c tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2; start 63 tools/testing/selftests/timers/mqueue-lat.c struct timespec start, end, now, target; start 75 tools/testing/selftests/timers/mqueue-lat.c clock_gettime(CLOCK_MONOTONIC, &start); start 94 tools/testing/selftests/timers/mqueue-lat.c if ((timespec_sub(start, end)/count) > TARGET_TIMEOUT + UNRESONABLE_LATENCY) start 100 tools/testing/selftests/timers/nsleep-lat.c struct timespec start, end, target; start 107 tools/testing/selftests/timers/nsleep-lat.c if (clock_gettime(clockid, &start)) start 115 tools/testing/selftests/timers/nsleep-lat.c clock_gettime(clockid, &start); start 120 tools/testing/selftests/timers/nsleep-lat.c if (((timespec_sub(start, end)/count)-ns) > UNRESONABLE_LATENCY) { start 121 tools/testing/selftests/timers/nsleep-lat.c printf("Large rel latency: %lld ns :", (timespec_sub(start, end)/count)-ns); start 127 tools/testing/selftests/timers/nsleep-lat.c clock_gettime(clockid, &start); start 128 tools/testing/selftests/timers/nsleep-lat.c target = timespec_add(start, ns); start 62 tools/testing/selftests/timers/posix_timers.c static int check_diff(struct timeval start, struct timeval end) start 66 tools/testing/selftests/timers/posix_timers.c diff = end.tv_usec - start.tv_usec; start 67 tools/testing/selftests/timers/posix_timers.c diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC; start 80 tools/testing/selftests/timers/posix_timers.c struct timeval start, end; start 105 tools/testing/selftests/timers/posix_timers.c err = gettimeofday(&start, NULL); start 130 tools/testing/selftests/timers/posix_timers.c if (!check_diff(start, end)) start 142 tools/testing/selftests/timers/posix_timers.c struct timeval start, end; start 163 tools/testing/selftests/timers/posix_timers.c err = gettimeofday(&start, NULL); start 183 tools/testing/selftests/timers/posix_timers.c if (!check_diff(start, end)) start 60 tools/testing/selftests/timers/raw_skew.c long long diff_timespec(struct timespec start, struct timespec end) start 64 tools/testing/selftests/timers/raw_skew.c start_ns = ts_to_nsec(start); start 71 tools/testing/selftests/timers/raw_skew.c struct timespec start, mid, end; start 78 tools/testing/selftests/timers/raw_skew.c clock_gettime(CLOCK_MONOTONIC, &start); start 82 tools/testing/selftests/timers/raw_skew.c newdiff = diff_timespec(start, end); start 86 tools/testing/selftests/timers/raw_skew.c tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2; start 94 tools/testing/selftests/timers/raw_skew.c struct timespec mon, raw, start, end; start 108 tools/testing/selftests/timers/raw_skew.c start = mon; start 124 tools/testing/selftests/timers/raw_skew.c interval = diff_timespec(start, end); start 33 tools/testing/selftests/timers/rtcpie.c struct timeval start, end, diff; start 95 tools/testing/selftests/timers/rtcpie.c gettimeofday(&start, NULL); start 103 tools/testing/selftests/timers/rtcpie.c timersub(&end, &start, &diff); start 78 tools/testing/selftests/timers/set-2038.c time_t start; start 88 tools/testing/selftests/timers/set-2038.c start = time(0); start 129 tools/testing/selftests/timers/set-2038.c settime(start); start 123 tools/testing/selftests/timers/threadtest.c time_t start, now, runtime; start 162 tools/testing/selftests/timers/threadtest.c start = time(0); start 163 tools/testing/selftests/timers/threadtest.c strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start)); start 172 tools/testing/selftests/timers/threadtest.c while (time(&now) < start + runtime) { start 15 tools/testing/selftests/vm/mlock2-tests.c unsigned long start; start 26 tools/testing/selftests/vm/mlock2-tests.c unsigned long start; start 55 tools/testing/selftests/vm/mlock2-tests.c sscanf(line, "%lx", &start); start 58 tools/testing/selftests/vm/mlock2-tests.c if (start <= addr && end > addr) { start 59 tools/testing/selftests/vm/mlock2-tests.c area->start = start; start 447 tools/testing/selftests/vm/mlock2-tests.c if (page1.start != page2.start || page2.start != page3.start) { start 466 tools/testing/selftests/vm/mlock2-tests.c if (page1.start == page2.start || page2.start == page3.start) { start 485 tools/testing/selftests/vm/mlock2-tests.c if (page1.start != page2.start || page2.start != page3.start) { start 15 tools/testing/selftests/vm/mlock2.h static int mlock2_(void *start, size_t len, int flags) start 18 tools/testing/selftests/vm/mlock2.h return syscall(__NR_mlock2, start, len, flags); start 30 tools/testing/selftests/vm/mlock2.h unsigned long start, end; start 45 tools/testing/selftests/vm/mlock2.h &start, &end, perms, &offset, dev, &inode, path) < 6) start 48 tools/testing/selftests/vm/mlock2.h if (start <= addr && addr < end) start 148 tools/testing/selftests/vm/userfaultfd.c static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset) start 205 tools/testing/selftests/vm/userfaultfd.c static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset) start 215 tools/testing/selftests/vm/userfaultfd.c *start = (unsigned long) area_dst_alias + offset; start 245 tools/testing/selftests/vm/userfaultfd.c void (*alias_mapping)(__u64 *start, size_t len, unsigned long offset); start 293 tools/testing/selftests/vm/userfaultfd.c time_t start; start 324 tools/testing/selftests/vm/userfaultfd.c start = time(NULL); start 385 tools/testing/selftests/vm/userfaultfd.c if (time(NULL) - start > 1) start 389 tools/testing/selftests/vm/userfaultfd.c time(NULL) - start); start 535 tools/testing/selftests/vm/userfaultfd.c uffd_reg.range.start = msg.arg.remove.start; start 537 tools/testing/selftests/vm/userfaultfd.c msg.arg.remove.start; start 801 tools/testing/selftests/vm/userfaultfd.c uffd_test_ops->alias_mapping(&uffdio_zeropage->range.start, start 825 tools/testing/selftests/vm/userfaultfd.c uffdio_zeropage.range.start = (unsigned long) area_dst + offset; start 884 tools/testing/selftests/vm/userfaultfd.c uffdio_register.range.start = (unsigned long) area_dst; start 929 tools/testing/selftests/vm/userfaultfd.c uffdio_register.range.start = (unsigned long) area_dst; start 988 tools/testing/selftests/vm/userfaultfd.c uffdio_register.range.start = (unsigned long) area_dst; start 1119 tools/testing/selftests/vm/userfaultfd.c uffdio_register.range.start = (unsigned long) area_dst; start 1135 tools/testing/selftests/vm/userfaultfd.c uffdio_register.range.start = (unsigned long) start 1180 tools/testing/selftests/vm/userfaultfd.c uffdio_register.range.start = (unsigned long) area_dst; start 61 tools/testing/selftests/x86/test_vdso.c void *start, *end; start 66 tools/testing/selftests/x86/test_vdso.c &start, &end, &r, &x, name) != 5) start 219 tools/testing/selftests/x86/test_vdso.c struct timespec start, vdso, end; start 224 tools/testing/selftests/x86/test_vdso.c if (sys_clock_gettime(clock, &start) < 0) { start 250 tools/testing/selftests/x86/test_vdso.c (unsigned long long)start.tv_sec, start.tv_nsec, start 254 tools/testing/selftests/x86/test_vdso.c if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) { start 275 tools/testing/selftests/x86/test_vdso.c struct timeval start, vdso, end; start 284 tools/testing/selftests/x86/test_vdso.c if (sys_gettimeofday(&start, &sys_tz) < 0) { start 301 tools/testing/selftests/x86/test_vdso.c (unsigned long long)start.tv_sec, start.tv_usec, start 305 tools/testing/selftests/x86/test_vdso.c if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) { start 117 tools/testing/selftests/x86/test_vsyscall.c void *start, *end; start 122 tools/testing/selftests/x86/test_vsyscall.c &start, &end, &r, &x, name) != 5) start 130 tools/testing/selftests/x86/test_vsyscall.c if (start != (void *)0xffffffffff600000 || start 49 tools/thermal/tmon/tui.c static void draw_hbar(WINDOW *win, int y, int start, int len, start 524 tools/thermal/tmon/tui.c static void draw_hbar(WINDOW *win, int y, int start, int len, unsigned long ptn, start 527 tools/thermal/tmon/tui.c mvwaddch(win, y, start, ptn); start 193 tools/usb/usbip/libsrc/vhci_driver.c char *buffer, *start, *end; start 220 tools/usb/usbip/libsrc/vhci_driver.c start = buffer; start 222 tools/usb/usbip/libsrc/vhci_driver.c end = strchr(start, delim[part]); start 223 tools/usb/usbip/libsrc/vhci_driver.c if (end == NULL || (end - start) > max_len[part]) { start 227 tools/usb/usbip/libsrc/vhci_driver.c start = end + 1; start 44 tools/virtio/vringh_test.c r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset; start 58 tools/virtio/vringh_test.c r->start = addr; start 59 tools/virtio/vringh_test.c r->end_incl = r->start; start 755 tools/vm/page-types.c unsigned long start; start 766 tools/vm/page-types.c start = max_t(unsigned long, pg_start[i], index); start 769 tools/vm/page-types.c assert(start < index); start 770 tools/vm/page-types.c walk_vma(start, index - start); start 386 usr/gen_init_cpio.c char *start, *end, *var; start 388 usr/gen_init_cpio.c while ((start = strstr(new_location, "${")) && start 389 usr/gen_init_cpio.c (end = strchr(start + 2, '}'))) { start 390 usr/gen_init_cpio.c *start = *end = 0; start 391 usr/gen_init_cpio.c var = getenv(start + 2); start 335 virt/kvm/arm/mmu.c static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) start 338 virt/kvm/arm/mmu.c phys_addr_t addr = start, end = start + size; start 535 virt/kvm/arm/mmu.c phys_addr_t start, u64 size) start 538 virt/kvm/arm/mmu.c phys_addr_t addr = start, end = start + size; start 553 virt/kvm/arm/mmu.c static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) start 555 virt/kvm/arm/mmu.c __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size); start 558 virt/kvm/arm/mmu.c static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size) start 560 virt/kvm/arm/mmu.c __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size); start 610 virt/kvm/arm/mmu.c static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, start 617 virt/kvm/arm/mmu.c addr = start; start 626 virt/kvm/arm/mmu.c static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, start 634 virt/kvm/arm/mmu.c addr = start; start 659 virt/kvm/arm/mmu.c static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, start 668 virt/kvm/arm/mmu.c addr = start; start 693 virt/kvm/arm/mmu.c unsigned long start, unsigned long end, start 702 virt/kvm/arm/mmu.c addr = start & PAGE_MASK; start 754 virt/kvm/arm/mmu.c unsigned long start = kern_hyp_va((unsigned long)from); start 760 virt/kvm/arm/mmu.c start = start & PAGE_MASK; start 763 virt/kvm/arm/mmu.c for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { start 766 virt/kvm/arm/mmu.c phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); start 1544 virt/kvm/arm/mmu.c phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; start 1548 virt/kvm/arm/mmu.c stage2_wp_range(kvm, start, end); start 1569 virt/kvm/arm/mmu.c phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; start 1572 virt/kvm/arm/mmu.c stage2_wp_range(kvm, start, end); start 2014 virt/kvm/arm/mmu.c unsigned long start, start 2032 virt/kvm/arm/mmu.c hva_start = max(start, memslot->userspace_addr); start 2052 virt/kvm/arm/mmu.c unsigned long start, unsigned long end) start 2057 virt/kvm/arm/mmu.c trace_kvm_unmap_hva_range(start, end); start 2058 virt/kvm/arm/mmu.c handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); start 2137 virt/kvm/arm/mmu.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) start 2141 virt/kvm/arm/mmu.c trace_kvm_age_hva(start, end); start 2142 virt/kvm/arm/mmu.c return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); start 473 virt/kvm/arm/pmu.c cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); start 139 virt/kvm/arm/trace.h TP_PROTO(unsigned long start, unsigned long end), start 140 virt/kvm/arm/trace.h TP_ARGS(start, end), start 143 virt/kvm/arm/trace.h __field( unsigned long, start ) start 148 virt/kvm/arm/trace.h __entry->start = start; start 153 virt/kvm/arm/trace.h __entry->start, __entry->end) start 172 virt/kvm/arm/trace.h TP_PROTO(unsigned long start, unsigned long end), start 173 virt/kvm/arm/trace.h TP_ARGS(start, end), start 176 virt/kvm/arm/trace.h __field( unsigned long, start ) start 181 virt/kvm/arm/trace.h __entry->start = start; start 186 virt/kvm/arm/trace.h __entry->start, __entry->end) start 252 virt/kvm/arm/vgic/vgic-debug.c .start = vgic_debug_start, start 371 virt/kvm/arm/vgic/vgic-v2.c if (!info->vctrl.start) { start 376 virt/kvm/arm/vgic/vgic-v2.c if (!PAGE_ALIGNED(info->vcpu.start) || start 380 virt/kvm/arm/vgic/vgic-v2.c ret = create_hyp_io_mappings(info->vcpu.start, start 392 virt/kvm/arm/vgic/vgic-v2.c ret = create_hyp_io_mappings(info->vctrl.start, start 411 virt/kvm/arm/vgic/vgic-v2.c kvm_vgic_global_state.vcpu_base = info->vcpu.start; start 415 virt/kvm/arm/vgic/vgic-v2.c kvm_debug("vgic-v2@%llx\n", info->vctrl.start); start 602 virt/kvm/arm/vgic/vgic-v3.c if (!info->vcpu.start) { start 605 virt/kvm/arm/vgic/vgic-v3.c } else if (!PAGE_ALIGNED(info->vcpu.start)) { start 607 virt/kvm/arm/vgic/vgic-v3.c (unsigned long long)info->vcpu.start); start 610 virt/kvm/arm/vgic/vgic-v3.c kvm_vgic_global_state.vcpu_base = info->vcpu.start; start 617 virt/kvm/arm/vgic/vgic-v3.c kvm_info("vgic-v2@%llx\n", info->vcpu.start); start 412 virt/kvm/eventfd.c irqfd->consumer.start = kvm_arch_irq_bypass_start; start 161 virt/kvm/kvm_main.c unsigned long start, unsigned long end) start 385 virt/kvm/kvm_main.c unsigned long start, unsigned long end) start 391 virt/kvm/kvm_main.c kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); start 428 virt/kvm/kvm_main.c need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); start 466 virt/kvm/kvm_main.c unsigned long start, start 475 virt/kvm/kvm_main.c young = kvm_age_hva(kvm, start, end); start 487 virt/kvm/kvm_main.c unsigned long start, start 508 virt/kvm/kvm_main.c young = kvm_age_hva(kvm, start, end); start 2471 virt/kvm/kvm_main.c ktime_t start, cur; start 2478 virt/kvm/kvm_main.c start = cur = ktime_get(); start 2512 virt/kvm/kvm_main.c block_ns = ktime_to_ns(cur) - ktime_to_ns(start); start 49 virt/lib/irqbypass.c if (cons->start) start 50 virt/lib/irqbypass.c cons->start(cons); start 51 virt/lib/irqbypass.c if (prod->start) start 52 virt/lib/irqbypass.c prod->start(prod); start 71 virt/lib/irqbypass.c if (cons->start) start 72 virt/lib/irqbypass.c cons->start(cons); start 73 virt/lib/irqbypass.c if (prod->start) start 74 virt/lib/irqbypass.c prod->start(prod);